hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
d9efa53a65f9717fd41f1b4bbdc51c65092eb56d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ static void ShiftFinalData(float* AfterFFTData, float* ShiftData, int SizeX, int SizeY, int FinalSizeZ, int FinalDataSize) { // // // => | -> // ("->" "=>" ) int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= FinalDataSize) { printf("Shift Data !!\n"); return; } // int idZ = id % FinalSizeZ; int tempIndex = id / FinalSizeZ; int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; // SizeY // (0 ~ 124 125 ~ 249) // // (125 ~ 249 0 ~ 124) idY = (idY + SizeY / 2) % SizeY; int NewIndex = idY * SizeX * FinalSizeZ + idX * FinalSizeZ + idZ; ShiftData[id] = AfterFFTData[NewIndex]; //ShiftData[id] = AfterFFTData[id]; }
d9efa53a65f9717fd41f1b4bbdc51c65092eb56d.cu
#include "includes.h" __global__ static void ShiftFinalData(float* AfterFFTData, float* ShiftData, int SizeX, int SizeY, int FinalSizeZ, int FinalDataSize) { // 這邊要做位移 // 由於硬體是這樣子 ↓ // => | -> // ("->" 是指第一段,"=>" 是指第二段) int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= FinalDataSize) { printf("Shift Data 有錯誤!!\n"); return; } // 這邊的算法要對應回去原本的資料 int idZ = id % FinalSizeZ; int tempIndex = id / FinalSizeZ; int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; // SizeY 折回來 // (0 ~ 124 125 ~ 249) // ↓ // (125 ~ 249 0 ~ 124) idY = (idY + SizeY / 2) % SizeY; int NewIndex = idY * SizeX * FinalSizeZ + idX * FinalSizeZ + idZ; ShiftData[id] = AfterFFTData[NewIndex]; //ShiftData[id] = AfterFFTData[id]; }
a669a93988c4b86d3964b59c02b109e681bd1d06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file bc_app.cu * * @brief Gunrock betweeness centrality (BC) application */ #include <gunrock/gunrock.h> // graph construction utilities #include <gunrock/graphio/market.cuh> // betweeness centrality includes #include <gunrock/app/bc/bc_enactor.cuh> #include <gunrock/app/bc/bc_problem.cuh> #include <gunrock/app/bc/bc_functor.cuh> #include <moderngpu.cuh> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bc; /** * @brief Test_Parameter structure */ struct Test_Parameter : gunrock::app::TestParameter_Base { public: std::string ref_filename; double max_queue_sizing1; Test_Parameter() { ref_filename = ""; max_queue_sizing1 = -1.0; } ~Test_Parameter() { } }; /** * @brief Graph edge properties (bundled properties) */ struct EdgeProperties { int weight; }; template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void runBC(GRGraph* output, Test_Parameter *parameter); /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam INSTRUMENT Keep kernels statics * @tparam DEBUG Keep debug statics * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG > void RunTests_size_check(GRGraph* output, Test_Parameter *parameter) { if (parameter->size_check) runBC<VertexId, Value, SizeT, INSTRUMENT, DEBUG, true>(output, parameter); else runBC<VertexId, Value, SizeT, INSTRUMENT, DEBUG, false>(output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam INSTRUMENT Keep kernels statics * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT > void RunTests_debug(GRGraph* output, Test_Parameter *parameter) { if (parameter->debug) RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(output, parameter); else RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false> (output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT > void RunTests_instrumented(GRGraph* output, Test_Parameter *parameter) { if (parameter->instrumented) RunTests_debug<VertexId, Value, SizeT, true>(output, parameter); else RunTests_debug<VertexId, Value, SizeT, false>(output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam INSTRUMENT Keep kernels statics * @tparam DEBUG Keep debug statics * @tparam SIZE_CHECK Enable size check * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void runBC(GRGraph* output, Test_Parameter *parameter) { typedef BCProblem <VertexId, SizeT, Value, true, // MARK_PREDECESSORS false > BcProblem; // Does not use double buffer typedef BCEnactor <BcProblem, INSTRUMENT, DEBUG, SIZE_CHECK > BcEnactor; Csr<VertexId, Value, SizeT> *graph = (Csr<VertexId, Value, SizeT>*)parameter->graph; VertexId src = (VertexId)parameter -> src; int max_grid_size = parameter -> max_grid_size; int num_gpus = parameter -> num_gpus; double max_queue_sizing = parameter -> max_queue_sizing; double max_queue_sizing1 = parameter -> max_queue_sizing1; double max_in_sizing = parameter -> max_in_sizing; ContextPtr *context = (ContextPtr*)parameter -> context; std::string partition_method = parameter -> partition_method; int *gpu_idx = parameter -> gpu_idx; hipStream_t *streams = parameter -> streams; float partition_factor = parameter -> partition_factor; int partition_seed = parameter -> partition_seed; bool g_stream_from_host = parameter -> g_stream_from_host; size_t *org_size = new size_t [num_gpus]; // Allocate host-side arrays Value *h_sigmas = new Value [graph->nodes]; Value *h_bc_values = new Value [graph->nodes]; Value *h_ebc_values = new Value [graph->edges]; VertexId *h_labels = new VertexId[graph->nodes]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&(org_size[gpu]), &dummy); } BcEnactor* enactor = new BcEnactor(num_gpus, gpu_idx); // BC enactor map BcProblem* problem = new BcProblem; // Allocate problem on GPU util::GRError( problem->Init( g_stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "BC Problem Initialization Failed", __FILE__, __LINE__); util::GRError( enactor->Init(context, problem, max_grid_size), "BC Enactor init failed", __FILE__, __LINE__); // Perform BC CpuTimer cpu_timer; VertexId start_src; VertexId end_src; if (src == -1) { start_src = 0; end_src = graph->nodes; } else { start_src = src; end_src = src + 1; } for (int gpu = 0; gpu < num_gpus; gpu++) { util::SetDevice(gpu_idx[gpu]); hipLaunchKernelGGL(( util::MemsetKernel) , dim3(128), dim3(128) , 0, 0, problem->data_slices[gpu]->bc_values.GetPointer(util::DEVICE), (Value)0.0f, (int)(problem->sub_graphs[gpu].nodes)); } util::GRError( problem->Reset(0, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BC Problem Data Reset Failed", __FILE__, __LINE__); printf("__________________________\n"); fflush(stdout); cpu_timer.Start(); for (VertexId i = start_src; i < end_src; ++i) { util::GRError( problem->Reset(i, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BC Problem Data Reset Failed", __FILE__, __LINE__); util::GRError( enactor ->Reset(), "BC Enactor Reset failed", __FILE__, __LINE__); util::GRError( enactor ->Enact(i), "BC Problem Enact Failed", __FILE__, __LINE__); } for (int gpu = 0; gpu < num_gpus; gpu++) { util::SetDevice(gpu_idx[gpu]); hipLaunchKernelGGL(( util::MemsetScaleKernel) , dim3(128), dim3(128) , 0, 0, problem->data_slices[gpu]->bc_values.GetPointer(util::DEVICE), (Value)0.5f, (int)(problem->sub_graphs[gpu].nodes)); } cpu_timer.Stop(); printf("--------------------------\n"); fflush(stdout); float elapsed = cpu_timer.ElapsedMillis(); // Copy out results util::GRError( problem->Extract(h_sigmas, h_bc_values, h_ebc_values, h_labels), "BC Problem Data Extraction Failed", __FILE__, __LINE__); output->node_value1 = (Value*)&h_bc_values[0]; output->edge_value1 = (Value*)&h_ebc_values[0]; printf(" GPU Betweenness Centrality finished in %lf msec.\n", elapsed); // Clean up if (org_size) { delete[] org_size; org_size = NULL; } if (problem ) { delete problem ; problem = NULL; } if (enactor ) { delete enactor ; enactor = NULL; } if (h_sigmas) { delete[] h_sigmas; h_sigmas = NULL; } if (h_labels) { delete[] h_labels; h_labels = NULL; } } /** * @brief Dispatch function to handle configurations * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Primitive-specific configurations * @param[in] data_t Data type configurations * @param[in] context ModernGPU context * @param[in] streams CUDA stream */ void dispatchBC( GRGraph* grapho, const GRGraph* graphi, const GRSetup config, const GRTypes data_t, ContextPtr* context, hipStream_t* streams) { Test_Parameter* parameter = new Test_Parameter; parameter->context = context; parameter->streams = streams; parameter->num_gpus = config.num_devices; parameter->gpu_idx = config.device_list; switch (data_t.VTXID_TYPE) { case VTXID_INT: { switch (data_t.SIZET_TYPE) { case SIZET_INT: { switch (data_t.VALUE_TYPE) { case VALUE_INT: { // template type = <int, int, int> // not support yet printf("Not Yet Support This DataType Combination.\n"); break; } case VALUE_UINT: { // template type = <int, uint, int> // not support yet printf("Not Yet Support This DataType Combination.\n"); break; } case VALUE_FLOAT: { // template type = <int, float, int> // build input csr format graph Csr<int, int, int> csr(false); csr.nodes = graphi->num_nodes; csr.edges = graphi->num_edges; csr.row_offsets = (int*)graphi->row_offsets; csr.column_indices = (int*)graphi->col_indices; parameter->graph = &csr; // determine source vertex to start switch (config.source_mode) { case randomize: { parameter->src = graphio::RandomNode(csr.nodes); break; } case largest_degree: { int max_deg = 0; parameter->src = csr.GetNodeWithHighestDegree(max_deg); break; } case manually: { parameter->src = config.source_vertex; break; } default: { parameter->src = 0; break; } } printf(" source: %lld\n", (long long) parameter->src); RunTests_instrumented<int, float, int>(grapho, parameter); csr.row_offsets = NULL; csr.column_indices = NULL; break; } } break; } } break; } } } /* * @brief Entry of gunrock_bc function * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Gunrock primitive specific configurations * @param[in] data_t Gunrock data type structure */ void gunrock_bc( GRGraph *grapho, const GRGraph *graphi, const GRSetup config, const GRTypes data_t) { // GPU-related configurations int num_gpus = 0; int *gpu_idx = NULL; ContextPtr *context = NULL; hipStream_t *streams = NULL; num_gpus = config.num_devices; gpu_idx = new int [num_gpus]; for (int i = 0; i < num_gpus; ++i) { gpu_idx[i] = config.device_list[i]; } // Create streams and MordernGPU context for each GPU streams = new hipStream_t[num_gpus * num_gpus * 2]; context = new ContextPtr[num_gpus * num_gpus]; printf(" using %d GPUs:", num_gpus); for (int gpu = 0; gpu < num_gpus; ++gpu) { printf(" %d ", gpu_idx[gpu]); util::SetDevice(gpu_idx[gpu]); for (int i = 0; i < num_gpus * 2; ++i) { int _i = gpu * num_gpus * 2 + i; util::GRError(hipStreamCreate(&streams[_i]), "hipStreamCreate fialed.", __FILE__, __LINE__); if (i < num_gpus) { context[gpu * num_gpus + i] = mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu], streams[_i]); } } } printf("\n"); dispatchBC(grapho, graphi, config, data_t, context, streams); } /* * @brief Simple interface take in CSR arrays as input * * @param[out] bc_scores Return BC node centrality per nodes * @param[in] num_nodes Number of nodes of the input graph * @param[in] num_edges Number of edges of the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] source Source to begin traverse/computation */ void bc( float* bc_scores, const int num_nodes, const int num_edges, const int* row_offsets, const int* col_indices, const int source) { struct GRTypes data_t; // primitive-specific data types data_t.VTXID_TYPE = VTXID_INT; // integer vertex identifier data_t.SIZET_TYPE = SIZET_INT; // integer graph size type data_t.VALUE_TYPE = VALUE_FLOAT; // float attributes type struct GRSetup config; // primitive-specific configures int list[] = {0, 1, 2, 3}; // device to run algorithm config.num_devices = sizeof(list) / sizeof(list[0]); // number of devices config.device_list = list; // device list to run algorithm config.source_mode = manually; // manually setting source vertex config.source_vertex = source; // source vertex to start config.max_queue_sizing = 1.0f; // maximum queue sizing factor struct GRGraph *grapho = (struct GRGraph*)malloc(sizeof(struct GRGraph)); struct GRGraph *graphi = (struct GRGraph*)malloc(sizeof(struct GRGraph)); graphi->num_nodes = num_nodes; // setting graph nodes graphi->num_edges = num_edges; // setting graph edges graphi->row_offsets = (void*)&row_offsets[0]; // setting row_offsets graphi->col_indices = (void*)&col_indices[0]; // setting col_indices printf(" loaded %d nodes and %d edges\n", num_nodes, num_edges); gunrock_bc(grapho, graphi, config, data_t); memcpy(bc_scores, (float*)grapho->node_value1, num_nodes * sizeof(float)); if (graphi) free(graphi); if (grapho) free(grapho); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
a669a93988c4b86d3964b59c02b109e681bd1d06.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file bc_app.cu * * @brief Gunrock betweeness centrality (BC) application */ #include <gunrock/gunrock.h> // graph construction utilities #include <gunrock/graphio/market.cuh> // betweeness centrality includes #include <gunrock/app/bc/bc_enactor.cuh> #include <gunrock/app/bc/bc_problem.cuh> #include <gunrock/app/bc/bc_functor.cuh> #include <moderngpu.cuh> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bc; /** * @brief Test_Parameter structure */ struct Test_Parameter : gunrock::app::TestParameter_Base { public: std::string ref_filename; double max_queue_sizing1; Test_Parameter() { ref_filename = ""; max_queue_sizing1 = -1.0; } ~Test_Parameter() { } }; /** * @brief Graph edge properties (bundled properties) */ struct EdgeProperties { int weight; }; template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void runBC(GRGraph* output, Test_Parameter *parameter); /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam INSTRUMENT Keep kernels statics * @tparam DEBUG Keep debug statics * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG > void RunTests_size_check(GRGraph* output, Test_Parameter *parameter) { if (parameter->size_check) runBC<VertexId, Value, SizeT, INSTRUMENT, DEBUG, true>(output, parameter); else runBC<VertexId, Value, SizeT, INSTRUMENT, DEBUG, false>(output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam INSTRUMENT Keep kernels statics * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT > void RunTests_debug(GRGraph* output, Test_Parameter *parameter) { if (parameter->debug) RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(output, parameter); else RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false> (output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT > void RunTests_instrumented(GRGraph* output, Test_Parameter *parameter) { if (parameter->instrumented) RunTests_debug<VertexId, Value, SizeT, true>(output, parameter); else RunTests_debug<VertexId, Value, SizeT, false>(output, parameter); } /** * @brief Run test * * @tparam VertexId Vertex identifier type * @tparam Value Attribute type * @tparam SizeT Graph size type * @tparam INSTRUMENT Keep kernels statics * @tparam DEBUG Keep debug statics * @tparam SIZE_CHECK Enable size check * * @praam[out] output Pointer to output graph structure of the problem * @param[in] parameter primitive-specific test parameters */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void runBC(GRGraph* output, Test_Parameter *parameter) { typedef BCProblem <VertexId, SizeT, Value, true, // MARK_PREDECESSORS false > BcProblem; // Does not use double buffer typedef BCEnactor <BcProblem, INSTRUMENT, DEBUG, SIZE_CHECK > BcEnactor; Csr<VertexId, Value, SizeT> *graph = (Csr<VertexId, Value, SizeT>*)parameter->graph; VertexId src = (VertexId)parameter -> src; int max_grid_size = parameter -> max_grid_size; int num_gpus = parameter -> num_gpus; double max_queue_sizing = parameter -> max_queue_sizing; double max_queue_sizing1 = parameter -> max_queue_sizing1; double max_in_sizing = parameter -> max_in_sizing; ContextPtr *context = (ContextPtr*)parameter -> context; std::string partition_method = parameter -> partition_method; int *gpu_idx = parameter -> gpu_idx; cudaStream_t *streams = parameter -> streams; float partition_factor = parameter -> partition_factor; int partition_seed = parameter -> partition_seed; bool g_stream_from_host = parameter -> g_stream_from_host; size_t *org_size = new size_t [num_gpus]; // Allocate host-side arrays Value *h_sigmas = new Value [graph->nodes]; Value *h_bc_values = new Value [graph->nodes]; Value *h_ebc_values = new Value [graph->edges]; VertexId *h_labels = new VertexId[graph->nodes]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&(org_size[gpu]), &dummy); } BcEnactor* enactor = new BcEnactor(num_gpus, gpu_idx); // BC enactor map BcProblem* problem = new BcProblem; // Allocate problem on GPU util::GRError( problem->Init( g_stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "BC Problem Initialization Failed", __FILE__, __LINE__); util::GRError( enactor->Init(context, problem, max_grid_size), "BC Enactor init failed", __FILE__, __LINE__); // Perform BC CpuTimer cpu_timer; VertexId start_src; VertexId end_src; if (src == -1) { start_src = 0; end_src = graph->nodes; } else { start_src = src; end_src = src + 1; } for (int gpu = 0; gpu < num_gpus; gpu++) { util::SetDevice(gpu_idx[gpu]); util::MemsetKernel <<< 128, 128 >>> ( problem->data_slices[gpu]->bc_values.GetPointer(util::DEVICE), (Value)0.0f, (int)(problem->sub_graphs[gpu].nodes)); } util::GRError( problem->Reset(0, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BC Problem Data Reset Failed", __FILE__, __LINE__); printf("__________________________\n"); fflush(stdout); cpu_timer.Start(); for (VertexId i = start_src; i < end_src; ++i) { util::GRError( problem->Reset(i, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BC Problem Data Reset Failed", __FILE__, __LINE__); util::GRError( enactor ->Reset(), "BC Enactor Reset failed", __FILE__, __LINE__); util::GRError( enactor ->Enact(i), "BC Problem Enact Failed", __FILE__, __LINE__); } for (int gpu = 0; gpu < num_gpus; gpu++) { util::SetDevice(gpu_idx[gpu]); util::MemsetScaleKernel <<< 128, 128 >>> ( problem->data_slices[gpu]->bc_values.GetPointer(util::DEVICE), (Value)0.5f, (int)(problem->sub_graphs[gpu].nodes)); } cpu_timer.Stop(); printf("--------------------------\n"); fflush(stdout); float elapsed = cpu_timer.ElapsedMillis(); // Copy out results util::GRError( problem->Extract(h_sigmas, h_bc_values, h_ebc_values, h_labels), "BC Problem Data Extraction Failed", __FILE__, __LINE__); output->node_value1 = (Value*)&h_bc_values[0]; output->edge_value1 = (Value*)&h_ebc_values[0]; printf(" GPU Betweenness Centrality finished in %lf msec.\n", elapsed); // Clean up if (org_size) { delete[] org_size; org_size = NULL; } if (problem ) { delete problem ; problem = NULL; } if (enactor ) { delete enactor ; enactor = NULL; } if (h_sigmas) { delete[] h_sigmas; h_sigmas = NULL; } if (h_labels) { delete[] h_labels; h_labels = NULL; } } /** * @brief Dispatch function to handle configurations * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Primitive-specific configurations * @param[in] data_t Data type configurations * @param[in] context ModernGPU context * @param[in] streams CUDA stream */ void dispatchBC( GRGraph* grapho, const GRGraph* graphi, const GRSetup config, const GRTypes data_t, ContextPtr* context, cudaStream_t* streams) { Test_Parameter* parameter = new Test_Parameter; parameter->context = context; parameter->streams = streams; parameter->num_gpus = config.num_devices; parameter->gpu_idx = config.device_list; switch (data_t.VTXID_TYPE) { case VTXID_INT: { switch (data_t.SIZET_TYPE) { case SIZET_INT: { switch (data_t.VALUE_TYPE) { case VALUE_INT: { // template type = <int, int, int> // not support yet printf("Not Yet Support This DataType Combination.\n"); break; } case VALUE_UINT: { // template type = <int, uint, int> // not support yet printf("Not Yet Support This DataType Combination.\n"); break; } case VALUE_FLOAT: { // template type = <int, float, int> // build input csr format graph Csr<int, int, int> csr(false); csr.nodes = graphi->num_nodes; csr.edges = graphi->num_edges; csr.row_offsets = (int*)graphi->row_offsets; csr.column_indices = (int*)graphi->col_indices; parameter->graph = &csr; // determine source vertex to start switch (config.source_mode) { case randomize: { parameter->src = graphio::RandomNode(csr.nodes); break; } case largest_degree: { int max_deg = 0; parameter->src = csr.GetNodeWithHighestDegree(max_deg); break; } case manually: { parameter->src = config.source_vertex; break; } default: { parameter->src = 0; break; } } printf(" source: %lld\n", (long long) parameter->src); RunTests_instrumented<int, float, int>(grapho, parameter); csr.row_offsets = NULL; csr.column_indices = NULL; break; } } break; } } break; } } } /* * @brief Entry of gunrock_bc function * * @param[out] grapho Pointer to output graph structure of the problem * @param[in] graphi Pointer to input graph we need to process on * @param[in] config Gunrock primitive specific configurations * @param[in] data_t Gunrock data type structure */ void gunrock_bc( GRGraph *grapho, const GRGraph *graphi, const GRSetup config, const GRTypes data_t) { // GPU-related configurations int num_gpus = 0; int *gpu_idx = NULL; ContextPtr *context = NULL; cudaStream_t *streams = NULL; num_gpus = config.num_devices; gpu_idx = new int [num_gpus]; for (int i = 0; i < num_gpus; ++i) { gpu_idx[i] = config.device_list[i]; } // Create streams and MordernGPU context for each GPU streams = new cudaStream_t[num_gpus * num_gpus * 2]; context = new ContextPtr[num_gpus * num_gpus]; printf(" using %d GPUs:", num_gpus); for (int gpu = 0; gpu < num_gpus; ++gpu) { printf(" %d ", gpu_idx[gpu]); util::SetDevice(gpu_idx[gpu]); for (int i = 0; i < num_gpus * 2; ++i) { int _i = gpu * num_gpus * 2 + i; util::GRError(cudaStreamCreate(&streams[_i]), "cudaStreamCreate fialed.", __FILE__, __LINE__); if (i < num_gpus) { context[gpu * num_gpus + i] = mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu], streams[_i]); } } } printf("\n"); dispatchBC(grapho, graphi, config, data_t, context, streams); } /* * @brief Simple interface take in CSR arrays as input * * @param[out] bc_scores Return BC node centrality per nodes * @param[in] num_nodes Number of nodes of the input graph * @param[in] num_edges Number of edges of the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] source Source to begin traverse/computation */ void bc( float* bc_scores, const int num_nodes, const int num_edges, const int* row_offsets, const int* col_indices, const int source) { struct GRTypes data_t; // primitive-specific data types data_t.VTXID_TYPE = VTXID_INT; // integer vertex identifier data_t.SIZET_TYPE = SIZET_INT; // integer graph size type data_t.VALUE_TYPE = VALUE_FLOAT; // float attributes type struct GRSetup config; // primitive-specific configures int list[] = {0, 1, 2, 3}; // device to run algorithm config.num_devices = sizeof(list) / sizeof(list[0]); // number of devices config.device_list = list; // device list to run algorithm config.source_mode = manually; // manually setting source vertex config.source_vertex = source; // source vertex to start config.max_queue_sizing = 1.0f; // maximum queue sizing factor struct GRGraph *grapho = (struct GRGraph*)malloc(sizeof(struct GRGraph)); struct GRGraph *graphi = (struct GRGraph*)malloc(sizeof(struct GRGraph)); graphi->num_nodes = num_nodes; // setting graph nodes graphi->num_edges = num_edges; // setting graph edges graphi->row_offsets = (void*)&row_offsets[0]; // setting row_offsets graphi->col_indices = (void*)&col_indices[0]; // setting col_indices printf(" loaded %d nodes and %d edges\n", num_nodes, num_edges); gunrock_bc(grapho, graphi, config, data_t); memcpy(bc_scores, (float*)grapho->node_value1, num_nodes * sizeof(float)); if (graphi) free(graphi); if (grapho) free(grapho); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
c0366e2c51ea0526f86ea0fecd254c536214dc76.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> #define N 100 #define DIM 2 #define PamM 2e-11 #define S 0.5 char le_entrada(); char inicializa_parametros(); float *aloca_matriz(int, int); void cal_cond_robin(); char parametro_independentes(); char copia_dados_para_gpu(); void copia_dados_para_cpu(); void clear_mem(); //char calcula_pressao_velocidade(int, int, int, int, int); //char atualiza_mult_lagrange(int tid); static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} //- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - // /* - - - - - - - Estruturas - - - - - - - */ typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_Q; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_L; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_B; typedef struct{ float *p, *p_old; }ESTRUTURA_PRESSAO; typedef struct{ float *perm, *font, *epsilon; }ESTRUTURA_MAT; /* - - - - - - - Fim das Estruturas - - - - - - - */ /* - - - - - - - Variaveis das Estruturas - - - - - - - */ ESTRUTURA_Q host_q, dev_q; ESTRUTURA_L host_l, dev_l; ESTRUTURA_B host_b, dev_b; ESTRUTURA_PRESSAO host_pressao, dev_pressao; ESTRUTURA_MAT host_mat, dev_mat; /* - - - - - - - Entradas Externas - - - - - - - */ int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1; float tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00; float h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA //float *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; //float *dev_mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; /* - - - - - - - Fim das Entradas Externas - - - - - - - */ /* - - - - - - - Fim das Variaveis das Estruturas - - - - - - - */ /* - - - - - - - Ponteiros para GPU - - - - - - - */ float *host_aux_1 = NULL, *dev_aux_1 = NULL, *dev_aux_2 = NULL, dev_erro = NULL, *dev_media = NULL; // float *dev_aux_1 = NULL, dev_erro = 0.0, dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0; // // float *dev_q.R = NULL, *dev_q.L = NULL, *dev_q.U = NULL, *dev_q.D = NULL; // float *dev_q.R_old = NULL, *dev_q.L_old = NULL, *dev_q.U_old = NULL, *dev_q.D_old = NULL; // // float *dev_l.R = NULL, *dev_l.L = NULL, *dev_l.U = NULL, *dev_l.D = NULL; // float *dev_l.R_old = NULL, *dev_l.L_old = NULL, *dev_l.U_old = NULL, *dev_l.D_old = NULL; // // float *dev_b.R = NULL, *dev_b.L = NULL, *dev_b.U = NULL, *dev_b.D = NULL; // float *dev_b.R_old = NULL, *dev_b.L_old = NULL, *dev_b.U_old = NULL, *dev_b.D_old = NULL; // // float *dev_pressao.p = NULL, *dev_pressao.p_old = NULL; // //- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - // __device__ char atualiza_mult_lagrange( int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b ){ int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; // (offset -1) = comprimento do kernel index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; dev_l.U[index_mem_central] = dev_b.U[index_mem_central] * (dev_q.U[index_mem_central] + dev_q.D_old[index_mem_uper]) + dev_l.D_old[index_mem_uper]; dev_l.D[index_mem_central] = dev_b.D[index_mem_central] * (dev_q.D[index_mem_central] + dev_q.U_old[index_mem_down]) + dev_l.U_old[index_mem_down]; dev_l.R[index_mem_central] = dev_b.R[index_mem_central] * (dev_q.R[index_mem_central] + dev_q.L_old[index_mem_right]) + dev_l.L_old[index_mem_right]; dev_l.L[index_mem_central] = dev_b.L[index_mem_central] * (dev_q.L[index_mem_central] + dev_q.R_old[index_mem_left]) + dev_l.R_old[index_mem_left]; return 0; } __device__ char calcula_pressao_velocidade( int tid, int uper, int right, int down, int left, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat ){ float auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0; int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; if(uper == 1){ auxU = dev_mat.epsilon[index_mem_central] / (1 + dev_b.U[index_mem_central] * dev_mat.epsilon[index_mem_central]); DU = auxU * (dev_b.U[index_mem_central] * dev_q.D_old[index_mem_uper] + dev_l.D_old[index_mem_uper]); } if(right == 1){ auxR = dev_mat.epsilon[index_mem_central] / (1 + dev_b.R[index_mem_central] * dev_mat.epsilon[index_mem_central]); DR = auxR * (dev_b.R[index_mem_central] * dev_q.L_old[index_mem_right] + dev_l.L_old[index_mem_right]); } if(down == 1){ auxD = dev_mat.epsilon[index_mem_central] / (1 + dev_b.D[index_mem_central] * dev_mat.epsilon[index_mem_central]); DD = auxD * (dev_b.D[index_mem_central] * dev_q.U_old[index_mem_down] + dev_l.U_old[index_mem_down]); } if(left == 1){ auxL = dev_mat.epsilon[index_mem_central] / (1 + dev_b.L[index_mem_central] * dev_mat.epsilon[index_mem_central]); DL = auxL * (dev_b.L[index_mem_central] * dev_q.R_old[index_mem_left] + dev_l.R_old[index_mem_left]); } dev_pressao.p[index_mem_central] = (dev_mat.font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL); dev_q.L[index_mem_central] = auxL * dev_pressao.p[index_mem_central] - DL; dev_q.R[index_mem_central] = auxR * dev_pressao.p[index_mem_central] - DR; dev_q.U[index_mem_central] = auxU * dev_pressao.p[index_mem_central] - DU; dev_q.D[index_mem_central] = auxD * dev_pressao.p[index_mem_central] - DD; return 0; } __global__ void reduce1(float *g_idata, float *g_odata, int n) { __shared__ float sdata[64]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reducao(float *in){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; //efetuando a reduo while(i != 0){ if(tid < i) in[tid] += in[tid + i]; if(i % 2 == 1){ if(i>1) in[0] += in[i-1]; } __syncthreads(); i /= 2; } } __global__ void reducao2(float *in_1, float *in_2){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; while(i != 0){ if(tid < i) in_1[tid] += in_1[tid + i]; in_2[tid] += in_2[tid + i]; if(i % 2 == 1){ if(i>1) in_1[0] += in_1[i-1]; in_2[0] += in_2[i-1]; } __syncthreads(); i /= 2; } } __global__ void escoamento_monofasico( ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat, float *dev_aux_1, const float erro_max, float dev_erro, float *dev_media){ /*int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; a[offset] = offset;*/ /*vificar as condies de contorno*/ int flag_thread_centrais = 1; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; /*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da regio (tam_regiao = n + 2) */ int tid = x + y * blockDim.x * gridDim.x; //verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento) //int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int eq_tid_cant_sup_esq = dimensao_x + 1; int eq_tid_cant_sup_dir = dimensao_x + (dimensao_x - 2); // posio extremo sup direito int eq_tid_cant_inf_dir = (dimensao_x * dimensao_y) - (dimensao_x + 2); // posio extremo inf direito int eq_tid_cant_inf_esq = ((dimensao_x) * (dimensao_y - 2)) + 1; // posio extremo inf esquerdo // int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado int index_mem_central = tid; if(tid == eq_tid_cant_sup_esq){//canto superior esquerdo /*VERIFICAR AS CONDIES DE CONTORNO*/ /* * calcula_pressao_velocidade(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat * */ calcula_pressao_velocidade( tid, 0, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); /* * * atualiza_mult_lagrange(); * * param: int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b * */ atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_sup_dir){//canto superior direito /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_dir){//canto inferior direito /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_sup_dir)){//fronteira superior /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == dimensao_x - 2)){ //fronteira direita /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_x == 1)){//fronteira esquerda /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(flag_thread_centrais && (tid % dimensao_x >= 2) && (tid % dimensao_x <= (dimensao_x - 3)) && (tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_esq) ){ /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); } //dev_media[tid] = dev_pressao.p[tid]; /* dev_media[0] = reducao(dev_media, 100); dev_media[0] = dev_media[0] / (dimensao_x * dimensao_y); dev_pressao.p[index_mem_central] -= dev_media[0]; dev_l.D[index_mem_central] -= dev_media[0]; dev_l.U[index_mem_central] -= dev_media[0]; dev_l.L[index_mem_central] -= dev_media[0]; dev_l.R[index_mem_central] -= dev_media[0];*/ //avaliando criterio de convergencia /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] - dev_pressao.p_old[index_mem_central]; __syncthreads(); dev_aux_1[index_mem_central] = dev_aux_1[index_mem_central] * dev_aux_1[index_mem_central]; __syncthreads();*/ //reduo da primeira soma sum1 /*dev_sum1 = reducao(dev_aux_1, 100);*/ //reduo da segunda soma sum2 /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] * dev_pressao.p[index_mem_central]; __syncthreads(); dev_sum2 = reducao(dev_aux_1, 100); dev_erro = sqrt(dev_sum1 / dev_sum2);*/ //DUVIDA PARA COMO O SINAL DO ERRO /*if (dev_erro > erro_max){ return; } dev_pressao.p_old[index_mem_central] = dev_pressao.p[index_mem_central]; dev_q.U_old[index_mem_central] = dev_q.U[index_mem_central]; dev_q.R_old[index_mem_central] = dev_q.R[index_mem_central]; dev_q.L_old[index_mem_central] = dev_q.L[index_mem_central]; dev_q.D_old[index_mem_central] = dev_q.D[index_mem_central]; dev_l.D_old[index_mem_central] = dev_l.D[index_mem_central]; dev_l.U_old[index_mem_central] = dev_l.U[index_mem_central]; dev_l.L_old[index_mem_central] = dev_l.L[index_mem_central]; dev_l.R_old[index_mem_central] = dev_l.R[index_mem_central]; i++; }*/ } __global__ void perapara_criterio_convergencia(float *dev_aux_1, float *dev_aux_2, float *dev_media, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_PRESSAO dev_pressao){ // sum1 = 0.; // sum2 = 0.; // for (k=1; k<=n; k++) // for (j=1; j<=n; j++) // { // aux = p[j][k] - p_old[j][k]; // sum1 += aux*aux; // sum2 += p[j][k]*p[j][k]; // } // erro = sqrt(sum1/sum2); //float dev_sum1 = 0.0, dev_sum2 = 0.0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; // dimensao_x -= 2; // dimensao_y -= 2; float media = (dev_media[0] + dev_media[1]) / ((dimensao_x-2) * (dimensao_y-2)); int canto_sup_dir = dimensao_x - 1; int canto_inf_dir = (dimensao_x * dimensao_y ) - 1; int canto_inf_esq = (dimensao_x * dimensao_y ) - (dimensao_x - 1); dev_pressao.p[tid] -= media; dev_l.D[tid] -= media; dev_l.U[tid] -= media; dev_l.L[tid] -= media; dev_l.R[tid] -= media; dev_aux_1[tid] = dev_pressao.p[tid] - dev_pressao.p_old[tid]; dev_aux_1[tid] = dev_aux_1[tid] * dev_aux_1[tid]; dev_aux_2[tid] = dev_pressao.p[tid] * dev_pressao.p[tid]; //despresando valores do contorno if((tid >= 0) && (tid <= canto_sup_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid <= canto_inf_esq) && (tid % dimensao_x == 0)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid < canto_inf_esq) && (tid % dimensao_x == (dimensao_x -1))){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid >= canto_inf_esq) && (tid <= canto_inf_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } /*Media zero nas pressoes e multiplicadores de lagrange*/ // dev_pressao.p_old[tid] = dev_pressao.p[tid]; // dev_q.U_old[tid] = dev_q.U[tid]; // dev_q.R_old[tid] = dev_q.R[tid]; // dev_q.L_old[tid] = dev_q.L[tid]; // dev_q.D_old[tid] = dev_q.D[tid]; // // dev_l.D_old[tid] = dev_l.D[tid]; // dev_l.U_old[tid] = dev_l.U[tid]; // dev_l.L_old[tid] = dev_l.L[tid]; // dev_l.R_old[tid] = dev_l.R[tid]; } __global__ void teste( ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l ){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; //dev_pressao.p_old[tid] = dev_pressao.p[tid];hipMemcpy__ dev_q.U_old[tid] = dev_q.U[tid]; dev_q.R_old[tid] = dev_q.R[tid]; dev_q.L_old[tid] = dev_q.L[tid]; dev_q.D_old[tid] = dev_q.D[tid]; dev_l.D_old[tid] = dev_l.D[tid]; dev_l.U_old[tid] = dev_l.U[tid]; dev_l.L_old[tid] = dev_l.L[tid]; dev_l.R_old[tid] = dev_l.R[tid]; } int main(void){ le_entrada(); inicializa_parametros(); cal_cond_robin(); parametro_independentes(); copia_dados_para_gpu(); // dim3 block(comprimento/16 , altura/16); // dim3 thread(16, 16); dim3 block(2, 2); dim3 thread(5, 5); /* * escoamento_monofasico(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat, float *dev_aux_1, const float erro_max * */ int i = 0, j = 0; while (i < 3){ hipLaunchKernelGGL(( escoamento_monofasico), dim3(block), dim3(thread), 0, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat, dev_aux_1, 1e-5, dev_erro, dev_media); hipDeviceSynchronize(); // HANDLE_ERROR( hipMemcpy( dev_media, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), // hipMemcpyDeviceToDevice ) ); // // hipDeviceSynchronize(); hipLaunchKernelGGL(( reduce1), dim3(4), dim3(64), 0, 0, dev_pressao.p, dev_media, 100 );//verificar a melhor escolha para o kernel hipDeviceSynchronize(); HANDLE_ERROR( hipMemcpy( host_b.R, dev_media, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); hipDeviceSynchronize(); int k; float soma = 0.0; for(k = 0; k < 4; k++) soma += host_b.R[k]; soma = soma / (8*8); float teste = (float)(host_b.R[0] + host_b.R[1]) / (tam_mat_interna * tam_mat_interna); HANDLE_ERROR( hipMemcpy( host_b.R, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\ti = %d - matriz pressao sem a media\n", i); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%11.3E ", host_b.R[k*tam_mat_real + j]); printf("\n"); } hipLaunchKernelGGL(( perapara_criterio_convergencia), dim3(block), dim3(thread), 0, 0, dev_aux_1, dev_aux_2, dev_media, dev_q, dev_l, dev_pressao); printf("\n\n\t\t\t\ti = %d - dentro: soma/64: %f\t teste/64: %f\n", i, soma, teste); printf("\n\n\t\t\t\tmatriz media\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[k*tam_mat_real + j]); printf("\n"); } HANDLE_ERROR( hipMemcpy( host_b.R, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\tmatriz pressao com a media\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%11.3E ", host_b.R[k*tam_mat_real + j]); printf("\n"); } //reducao2<<<block, thread>>>( dev_aux_1, dev_aux_2); // reducao<<<block, thread>>>( dev_aux_1 ); // // hipDeviceSynchronize(); // // reducao<<<block, thread>>>( dev_aux_2 ); // // // hipDeviceSynchronize(); //teste<<<block, thread>>>( dev_pressao, dev_q, dev_l ); HANDLE_ERROR( hipMemcpy( dev_pressao.p_old, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.U_old, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.R_old, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.D_old, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.L_old, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.U_old, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.R_old, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.D_old, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.L_old, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToDevice ) ); //hipDeviceSynchronize(); HANDLE_ERROR( hipMemset( dev_media, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMemset( dev_aux_1, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMemset( dev_aux_2, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); hipDeviceSynchronize(); i++; } copia_dados_para_cpu(); printf("\ntam_mat_interna = %d\n", tam_mat_interna); printf("tam_mat_real = %d\n", tam_mat_real); printf("max_interacoes = %d\n", max_interacoes); printf("op_contorno = %d\n", op_contorno); printf("tam_regiao = %f\n", tam_regiao); printf("erro_max = %f\n", erro_max); printf("valor_contor = %f\n", valor_contor); /* printf("\n\n\t\t\tmat_font:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_mat.font[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_perm:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]); //printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_epsilon:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_mat.epsilon[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\tbeta U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D[i*tam_mat_real + j]); printf("\n"); }*/ printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]); printf("\n"); } printf("\npressao old:\n"); printf("\n\n\t\t\t\tpressao old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]); printf("\n"); } /*printf("\n\n\t\t\t\tb_U:\t\t\t\t\t\t\t\t\tb_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_R:\t\t\t\t\t\t\t\t\tb_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\t\t\t\t\t\t\t\t\tpressao_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tl_U:\t\t\t\t\t\t\t\t\tl_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\t\t\t\t\t\t\t\t\tl_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\t\t\t\t\t\t\t\t\tl_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\t\t\t\t\t\t\t\t\tl_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\t\t\t\t\t\t\t\t\tq_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\t\t\t\t\t\t\t\t\tq_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\t\t\t\t\t\t\t\t\tq_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\t\t\t\t\t\t\t\t\tq_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L_old[i*tam_mat_real + j]); printf("\n"); }*/ clear_mem(); // // system("pause"); return 0; } char le_entrada(){ printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n"); FILE *arq = NULL; //arq = fopen("../dir_entrada/parametro_entrada.txt", "r"); arq = fopen("parametro_entrada.txt", "r"); if(arq == NULL){ printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n"); exit(1); } else{ printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n"); /*char c[2], dados[255], buffer[255];*/ char buffer[255]; int cont = 1; while(cont < 9){ fscanf(arq, "%s", buffer); //puts(buffer); int i = 0, j = 0; switch(strlen(buffer)){ case 8: //erro_maximo fscanf(arq, "%f", &erro_max); break; case 10: //tam_regiao fscanf(arq, "%f", &tam_regiao); break; case 11: //opcao_contorno fscanf(arq, "%d", &op_contorno); break; case 12: //valor_contor fscanf(arq, "%f", &valor_contor); break; case 14: //max_interacoes fscanf(arq, "%d", &max_interacoes); break; case 15: //tam_mat_interna fscanf(arq, "%d", &tam_mat_interna); break; case 16: //matriz_de_fontes //uso (tam_mat_interna + 2) - pois ainda no inicializei 'tam_mat_real' host_mat.font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.font[i*(tam_mat_interna+2) + j]); break; case 18: //matriz_permeabilidade host_mat.perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); host_mat.epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.perm[i*(tam_mat_interna+2) + j]); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) host_mat.perm[i*(tam_mat_interna+2) + j] = PamM*exp(S * host_mat.perm[i*(tam_mat_interna+2) + j]); break; default: printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n"); return 0; } //int tam = strlen(buffer); cont++; } printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n"); } printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n"); return 1; } float *aloca_matriz(int L, int C){ float *aux = NULL; aux = (float *) calloc(L * C, sizeof(float)); if(aux == NULL){ printf("\n\n\t\tErro ao alocar memoria\n\n"); exit(1); }else{ return (aux); } return NULL; } /* * *VERIFICAR RETORNO * */ void cal_cond_robin(){ float keff = 0.0, numerador = 0.0, denominador = 0.0; float C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0 //Canto superior esquerdo numerador = ( 2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[tam_mat_real + 2] ); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[tam_mat_real + 2] ); keff = numerador / denominador; host_b.R[tam_mat_real + 1] = C*h/keff; numerador = (2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[(2*tam_mat_real) + 1]); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[(2*tam_mat_real) + 1]); keff = numerador / denominador; host_b.D[tam_mat_real + 1] = C*h/keff; //Canto superior direito numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[tam_mat_real + tam_mat_interna] = C*h/keff; //Canto infeior esquerdo numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); keff = numerador / denominador; host_b.R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; //Canto infeior direito numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; //Calculo das fronteiras e regio interna para betas int i = 0; for(i = 2; i < tam_mat_interna; i ++){ //Calcula fronteira superior numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i-1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i-1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i+1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i+1)] ); keff = numerador / denominador; host_b.R[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[(2 * tam_mat_real) + i] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[(2 * tam_mat_real) + i] ); keff = numerador / denominador; host_b.D[tam_mat_real + i] = C*h/keff; //Calcula fronteira esquerda numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i - 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i - 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[(i * tam_mat_real) + 2] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[(i * tam_mat_real) + 2] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i + 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i + 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + 1] = C*h/keff; //Calcula fronteira inferior numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); keff = numerador / denominador; host_b.U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); keff = numerador / denominador; host_b.R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; //Calcula fronteira direita numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; //Calcula dados internos int j = 0; for(j = 2; j < tam_mat_interna; j ++){ numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j + 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j + 1)] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i - 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i - 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i + 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i + 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + j] = C*h/keff; } } } /* * *VERIFICAR RETORNO * */ char parametro_independentes(){ int i = 0, j = 0; float constante = 2/h; for(i = 0; i < tam_mat_real; i ++) for(j = 0; j < tam_mat_real; j++){ host_mat.epsilon[i*tam_mat_real + j] = constante * host_mat.perm[i*tam_mat_real + j]; host_mat.font[i*tam_mat_real + j] *= h; } return 0; } char copia_dados_para_gpu(){ HANDLE_ERROR( hipMemcpy( dev_q.R, host_q.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.L, host_q.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.U, host_q.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.D, host_q.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.R_old, host_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.L_old, host_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.U_old, host_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.D_old, host_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.R, host_l.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.L, host_l.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.U, host_l.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.D, host_l.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.R_old, host_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.L_old, host_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.U_old, host_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.D_old, host_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.R, host_b.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.L, host_b.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.U, host_b.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.D, host_b.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.R_old, host_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.L_old, host_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.U_old, host_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.D_old, host_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_pressao.p, host_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_pressao.p_old, host_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_mat.perm, host_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_mat.epsilon, host_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_mat.font, host_mat.font, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); return 0; } void copia_dados_para_cpu(){ HANDLE_ERROR( hipMemcpy( host_q.R, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.L, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.U, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.D, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.R_old, dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.L_old, dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.U_old, dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.D_old, dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.R, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.L, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.U, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.D, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.R_old, dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.L_old, dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.U_old, dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.D_old, dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.R, dev_b.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.L, dev_b.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.U, dev_b.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.D, dev_b.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.R_old, dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.L_old, dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.U_old, dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.D_old, dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_pressao.p, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_pressao.p_old, dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_mat.font, dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_mat.perm, dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_mat.epsilon, dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); } char inicializa_parametros(){ printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n"); /* * * * CONTRUIR FUNCAO PARA VERIFICAR ERRO DE ALOCAO * VERIFICAR RETORNO */ tam_mat_real = tam_mat_interna + 2; h = tam_regiao / tam_mat_interna; HANDLE_ERROR( hipMalloc( (void**)&dev_q, sizeof(ESTRUTURA_Q) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_l, sizeof(ESTRUTURA_L) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b, sizeof(ESTRUTURA_B) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_pressao, sizeof(ESTRUTURA_PRESSAO) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat, sizeof(ESTRUTURA_MAT) ) ); host_q.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_aux_1, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_aux_2, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMemset( dev_aux_1, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMemset( dev_aux_2, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&erro_max, sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_erro, sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_media, tam_mat_real * tam_mat_real * sizeof(float)) ); host_aux_1 = aloca_matriz(tam_mat_real, tam_mat_real); int i = 0; switch(op_contorno){ case 1: //Inicializa contorno superior for(i = 0; i < tam_mat_real; i++){ host_q.D[i] = valor_contor; host_q.D_old[i] = valor_contor; } break; case 2://Inicializa contorno esquerdo for(i = 0; i < tam_mat_real; i++){ host_q.R[i*tam_mat_real] = valor_contor; host_q.R_old[i*tam_mat_real] = valor_contor; } break; case 3://Inicializa contorno direito for(i = 0; i < tam_mat_real; i++){ host_q.L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; host_q.L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; } break; case 4://Inicializa contorno inferior for(i = 0; i < tam_mat_real; i++){ host_q.L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; host_q.L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; } break; default: printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n"); break; } printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n"); return 1; } void clear_mem(){ HANDLE_ERROR( hipFree (dev_q.U)); HANDLE_ERROR( hipFree (dev_q.R)); HANDLE_ERROR( hipFree (dev_q.D)); HANDLE_ERROR( hipFree (dev_q.L)); free(host_q.U); free(host_q.R); free(host_q.D); free(host_q.L); HANDLE_ERROR( hipFree (dev_l.U)); HANDLE_ERROR( hipFree (dev_l.R)); HANDLE_ERROR( hipFree (dev_l.D)); HANDLE_ERROR( hipFree (dev_l.L)); free(host_l.U); free(host_l.R); free(host_l.D); free(host_l.L); HANDLE_ERROR( hipFree (dev_b.U)); HANDLE_ERROR( hipFree (dev_b.R)); HANDLE_ERROR( hipFree (dev_b.D)); HANDLE_ERROR( hipFree (dev_b.L)); free(host_b.U); free(host_b.R); free(host_b.D); free(host_b.L); HANDLE_ERROR( hipFree (dev_pressao.p)); HANDLE_ERROR( hipFree (dev_pressao.p_old)); free(host_pressao.p); free(host_pressao.p_old); HANDLE_ERROR( hipFree (dev_mat.perm)); HANDLE_ERROR( hipFree (dev_mat.font)); HANDLE_ERROR( hipFree (dev_mat.epsilon)); free(host_mat.perm); free(host_mat.font); free(host_mat.epsilon); }
c0366e2c51ea0526f86ea0fecd254c536214dc76.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cuda.h> #include <curand_kernel.h> #include <cuda_runtime.h> #define N 100 #define DIM 2 #define PamM 2e-11 #define S 0.5 char le_entrada(); char inicializa_parametros(); float *aloca_matriz(int, int); void cal_cond_robin(); char parametro_independentes(); char copia_dados_para_gpu(); void copia_dados_para_cpu(); void clear_mem(); //char calcula_pressao_velocidade(int, int, int, int, int); //char atualiza_mult_lagrange(int tid); static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} //- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - // /* - - - - - - - Estruturas - - - - - - - */ typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_Q; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_L; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_B; typedef struct{ float *p, *p_old; }ESTRUTURA_PRESSAO; typedef struct{ float *perm, *font, *epsilon; }ESTRUTURA_MAT; /* - - - - - - - Fim das Estruturas - - - - - - - */ /* - - - - - - - Variaveis das Estruturas - - - - - - - */ ESTRUTURA_Q host_q, dev_q; ESTRUTURA_L host_l, dev_l; ESTRUTURA_B host_b, dev_b; ESTRUTURA_PRESSAO host_pressao, dev_pressao; ESTRUTURA_MAT host_mat, dev_mat; /* - - - - - - - Entradas Externas - - - - - - - */ int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1; float tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00; float h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA //float *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; //float *dev_mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; /* - - - - - - - Fim das Entradas Externas - - - - - - - */ /* - - - - - - - Fim das Variaveis das Estruturas - - - - - - - */ /* - - - - - - - Ponteiros para GPU - - - - - - - */ float *host_aux_1 = NULL, *dev_aux_1 = NULL, *dev_aux_2 = NULL, dev_erro = NULL, *dev_media = NULL; // float *dev_aux_1 = NULL, dev_erro = 0.0, dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0; // // float *dev_q.R = NULL, *dev_q.L = NULL, *dev_q.U = NULL, *dev_q.D = NULL; // float *dev_q.R_old = NULL, *dev_q.L_old = NULL, *dev_q.U_old = NULL, *dev_q.D_old = NULL; // // float *dev_l.R = NULL, *dev_l.L = NULL, *dev_l.U = NULL, *dev_l.D = NULL; // float *dev_l.R_old = NULL, *dev_l.L_old = NULL, *dev_l.U_old = NULL, *dev_l.D_old = NULL; // // float *dev_b.R = NULL, *dev_b.L = NULL, *dev_b.U = NULL, *dev_b.D = NULL; // float *dev_b.R_old = NULL, *dev_b.L_old = NULL, *dev_b.U_old = NULL, *dev_b.D_old = NULL; // // float *dev_pressao.p = NULL, *dev_pressao.p_old = NULL; // //- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - // __device__ char atualiza_mult_lagrange( int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b ){ int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; // (offset -1) = comprimento do kernel index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; dev_l.U[index_mem_central] = dev_b.U[index_mem_central] * (dev_q.U[index_mem_central] + dev_q.D_old[index_mem_uper]) + dev_l.D_old[index_mem_uper]; dev_l.D[index_mem_central] = dev_b.D[index_mem_central] * (dev_q.D[index_mem_central] + dev_q.U_old[index_mem_down]) + dev_l.U_old[index_mem_down]; dev_l.R[index_mem_central] = dev_b.R[index_mem_central] * (dev_q.R[index_mem_central] + dev_q.L_old[index_mem_right]) + dev_l.L_old[index_mem_right]; dev_l.L[index_mem_central] = dev_b.L[index_mem_central] * (dev_q.L[index_mem_central] + dev_q.R_old[index_mem_left]) + dev_l.R_old[index_mem_left]; return 0; } __device__ char calcula_pressao_velocidade( int tid, int uper, int right, int down, int left, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat ){ float auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0; int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; if(uper == 1){ auxU = dev_mat.epsilon[index_mem_central] / (1 + dev_b.U[index_mem_central] * dev_mat.epsilon[index_mem_central]); DU = auxU * (dev_b.U[index_mem_central] * dev_q.D_old[index_mem_uper] + dev_l.D_old[index_mem_uper]); } if(right == 1){ auxR = dev_mat.epsilon[index_mem_central] / (1 + dev_b.R[index_mem_central] * dev_mat.epsilon[index_mem_central]); DR = auxR * (dev_b.R[index_mem_central] * dev_q.L_old[index_mem_right] + dev_l.L_old[index_mem_right]); } if(down == 1){ auxD = dev_mat.epsilon[index_mem_central] / (1 + dev_b.D[index_mem_central] * dev_mat.epsilon[index_mem_central]); DD = auxD * (dev_b.D[index_mem_central] * dev_q.U_old[index_mem_down] + dev_l.U_old[index_mem_down]); } if(left == 1){ auxL = dev_mat.epsilon[index_mem_central] / (1 + dev_b.L[index_mem_central] * dev_mat.epsilon[index_mem_central]); DL = auxL * (dev_b.L[index_mem_central] * dev_q.R_old[index_mem_left] + dev_l.R_old[index_mem_left]); } dev_pressao.p[index_mem_central] = (dev_mat.font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL); dev_q.L[index_mem_central] = auxL * dev_pressao.p[index_mem_central] - DL; dev_q.R[index_mem_central] = auxR * dev_pressao.p[index_mem_central] - DR; dev_q.U[index_mem_central] = auxU * dev_pressao.p[index_mem_central] - DU; dev_q.D[index_mem_central] = auxD * dev_pressao.p[index_mem_central] - DD; return 0; } __global__ void reduce1(float *g_idata, float *g_odata, int n) { __shared__ float sdata[64]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reducao(float *in){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; //efetuando a redução while(i != 0){ if(tid < i) in[tid] += in[tid + i]; if(i % 2 == 1){ if(i>1) in[0] += in[i-1]; } __syncthreads(); i /= 2; } } __global__ void reducao2(float *in_1, float *in_2){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; while(i != 0){ if(tid < i) in_1[tid] += in_1[tid + i]; in_2[tid] += in_2[tid + i]; if(i % 2 == 1){ if(i>1) in_1[0] += in_1[i-1]; in_2[0] += in_2[i-1]; } __syncthreads(); i /= 2; } } __global__ void escoamento_monofasico( ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat, float *dev_aux_1, const float erro_max, float dev_erro, float *dev_media){ /*int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; a[offset] = offset;*/ /*vificar as condições de contorno*/ int flag_thread_centrais = 1; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; /*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da região (tam_regiao = n + 2) */ int tid = x + y * blockDim.x * gridDim.x; //verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento) //int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int eq_tid_cant_sup_esq = dimensao_x + 1; int eq_tid_cant_sup_dir = dimensao_x + (dimensao_x - 2); // posição extremo sup direito int eq_tid_cant_inf_dir = (dimensao_x * dimensao_y) - (dimensao_x + 2); // posição extremo inf direito int eq_tid_cant_inf_esq = ((dimensao_x) * (dimensao_y - 2)) + 1; // posição extremo inf esquerdo // int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado int index_mem_central = tid; if(tid == eq_tid_cant_sup_esq){//canto superior esquerdo /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ /* * calcula_pressao_velocidade(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat * */ calcula_pressao_velocidade( tid, 0, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); /* * * atualiza_mult_lagrange(); * * param: int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b * */ atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_sup_dir){//canto superior direito /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_dir){//canto inferior direito /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_sup_dir)){//fronteira superior /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == dimensao_x - 2)){ //fronteira direita /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_x == 1)){//fronteira esquerda /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(flag_thread_centrais && (tid % dimensao_x >= 2) && (tid % dimensao_x <= (dimensao_x - 3)) && (tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_esq) ){ /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); } //dev_media[tid] = dev_pressao.p[tid]; /* dev_media[0] = reducao(dev_media, 100); dev_media[0] = dev_media[0] / (dimensao_x * dimensao_y); dev_pressao.p[index_mem_central] -= dev_media[0]; dev_l.D[index_mem_central] -= dev_media[0]; dev_l.U[index_mem_central] -= dev_media[0]; dev_l.L[index_mem_central] -= dev_media[0]; dev_l.R[index_mem_central] -= dev_media[0];*/ //avaliando criterio de convergencia /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] - dev_pressao.p_old[index_mem_central]; __syncthreads(); dev_aux_1[index_mem_central] = dev_aux_1[index_mem_central] * dev_aux_1[index_mem_central]; __syncthreads();*/ //redução da primeira soma sum1 /*dev_sum1 = reducao(dev_aux_1, 100);*/ //redução da segunda soma sum2 /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] * dev_pressao.p[index_mem_central]; __syncthreads(); dev_sum2 = reducao(dev_aux_1, 100); dev_erro = sqrt(dev_sum1 / dev_sum2);*/ //DUVIDA PARA COMO É O SINAL DO ERRO /*if (dev_erro > erro_max){ return; } dev_pressao.p_old[index_mem_central] = dev_pressao.p[index_mem_central]; dev_q.U_old[index_mem_central] = dev_q.U[index_mem_central]; dev_q.R_old[index_mem_central] = dev_q.R[index_mem_central]; dev_q.L_old[index_mem_central] = dev_q.L[index_mem_central]; dev_q.D_old[index_mem_central] = dev_q.D[index_mem_central]; dev_l.D_old[index_mem_central] = dev_l.D[index_mem_central]; dev_l.U_old[index_mem_central] = dev_l.U[index_mem_central]; dev_l.L_old[index_mem_central] = dev_l.L[index_mem_central]; dev_l.R_old[index_mem_central] = dev_l.R[index_mem_central]; i++; }*/ } __global__ void perapara_criterio_convergencia(float *dev_aux_1, float *dev_aux_2, float *dev_media, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_PRESSAO dev_pressao){ // sum1 = 0.; // sum2 = 0.; // for (k=1; k<=n; k++) // for (j=1; j<=n; j++) // { // aux = p[j][k] - p_old[j][k]; // sum1 += aux*aux; // sum2 += p[j][k]*p[j][k]; // } // erro = sqrt(sum1/sum2); //float dev_sum1 = 0.0, dev_sum2 = 0.0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; // dimensao_x -= 2; // dimensao_y -= 2; float media = (dev_media[0] + dev_media[1]) / ((dimensao_x-2) * (dimensao_y-2)); int canto_sup_dir = dimensao_x - 1; int canto_inf_dir = (dimensao_x * dimensao_y ) - 1; int canto_inf_esq = (dimensao_x * dimensao_y ) - (dimensao_x - 1); dev_pressao.p[tid] -= media; dev_l.D[tid] -= media; dev_l.U[tid] -= media; dev_l.L[tid] -= media; dev_l.R[tid] -= media; dev_aux_1[tid] = dev_pressao.p[tid] - dev_pressao.p_old[tid]; dev_aux_1[tid] = dev_aux_1[tid] * dev_aux_1[tid]; dev_aux_2[tid] = dev_pressao.p[tid] * dev_pressao.p[tid]; //despresando valores do contorno if((tid >= 0) && (tid <= canto_sup_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid <= canto_inf_esq) && (tid % dimensao_x == 0)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid < canto_inf_esq) && (tid % dimensao_x == (dimensao_x -1))){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid >= canto_inf_esq) && (tid <= canto_inf_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } /*Media zero nas pressoes e multiplicadores de lagrange*/ // dev_pressao.p_old[tid] = dev_pressao.p[tid]; // dev_q.U_old[tid] = dev_q.U[tid]; // dev_q.R_old[tid] = dev_q.R[tid]; // dev_q.L_old[tid] = dev_q.L[tid]; // dev_q.D_old[tid] = dev_q.D[tid]; // // dev_l.D_old[tid] = dev_l.D[tid]; // dev_l.U_old[tid] = dev_l.U[tid]; // dev_l.L_old[tid] = dev_l.L[tid]; // dev_l.R_old[tid] = dev_l.R[tid]; } __global__ void teste( ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l ){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; //dev_pressao.p_old[tid] = dev_pressao.p[tid];cuMemcpy dev_q.U_old[tid] = dev_q.U[tid]; dev_q.R_old[tid] = dev_q.R[tid]; dev_q.L_old[tid] = dev_q.L[tid]; dev_q.D_old[tid] = dev_q.D[tid]; dev_l.D_old[tid] = dev_l.D[tid]; dev_l.U_old[tid] = dev_l.U[tid]; dev_l.L_old[tid] = dev_l.L[tid]; dev_l.R_old[tid] = dev_l.R[tid]; } int main(void){ le_entrada(); inicializa_parametros(); cal_cond_robin(); parametro_independentes(); copia_dados_para_gpu(); // dim3 block(comprimento/16 , altura/16); // dim3 thread(16, 16); dim3 block(2, 2); dim3 thread(5, 5); /* * escoamento_monofasico(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat, float *dev_aux_1, const float erro_max * */ int i = 0, j = 0; while (i < 3){ escoamento_monofasico<<<block, thread>>>( dev_q, dev_l, dev_b, dev_pressao, dev_mat, dev_aux_1, 1e-5, dev_erro, dev_media); cudaDeviceSynchronize(); // HANDLE_ERROR( cudaMemcpy( dev_media, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), // cudaMemcpyDeviceToDevice ) ); // // cudaDeviceSynchronize(); reduce1<<<4, 64>>>( dev_pressao.p, dev_media, 100 );//verificar a melhor escolha para o kernel cudaDeviceSynchronize(); HANDLE_ERROR( cudaMemcpy( host_b.R, dev_media, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); cudaDeviceSynchronize(); int k; float soma = 0.0; for(k = 0; k < 4; k++) soma += host_b.R[k]; soma = soma / (8*8); float teste = (float)(host_b.R[0] + host_b.R[1]) / (tam_mat_interna * tam_mat_interna); HANDLE_ERROR( cudaMemcpy( host_b.R, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\ti = %d - matriz pressao sem a media\n", i); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%11.3E ", host_b.R[k*tam_mat_real + j]); printf("\n"); } perapara_criterio_convergencia<<<block, thread>>>( dev_aux_1, dev_aux_2, dev_media, dev_q, dev_l, dev_pressao); printf("\n\n\t\t\t\ti = %d - dentro: soma/64: %f\t teste/64: %f\n", i, soma, teste); printf("\n\n\t\t\t\tmatriz media\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[k*tam_mat_real + j]); printf("\n"); } HANDLE_ERROR( cudaMemcpy( host_b.R, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\tmatriz pressao com a media\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%11.3E ", host_b.R[k*tam_mat_real + j]); printf("\n"); } //reducao2<<<block, thread>>>( dev_aux_1, dev_aux_2); // reducao<<<block, thread>>>( dev_aux_1 ); // // cudaDeviceSynchronize(); // // reducao<<<block, thread>>>( dev_aux_2 ); // // // cudaDeviceSynchronize(); //teste<<<block, thread>>>( dev_pressao, dev_q, dev_l ); HANDLE_ERROR( cudaMemcpy( dev_pressao.p_old, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.U_old, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.R_old, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.D_old, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.L_old, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.U_old, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.R_old, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.D_old, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.L_old, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToDevice ) ); //cudaDeviceSynchronize(); HANDLE_ERROR( cudaMemset( dev_media, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMemset( dev_aux_1, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMemset( dev_aux_2, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); cudaDeviceSynchronize(); i++; } copia_dados_para_cpu(); printf("\ntam_mat_interna = %d\n", tam_mat_interna); printf("tam_mat_real = %d\n", tam_mat_real); printf("max_interacoes = %d\n", max_interacoes); printf("op_contorno = %d\n", op_contorno); printf("tam_regiao = %f\n", tam_regiao); printf("erro_max = %f\n", erro_max); printf("valor_contor = %f\n", valor_contor); /* printf("\n\n\t\t\tmat_font:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_mat.font[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_perm:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]); //printf("%12.4E ", host_mat.perm[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_epsilon:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_mat.epsilon[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\tbeta U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D[i*tam_mat_real + j]); printf("\n"); }*/ printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]); printf("\n"); } printf("\npressao old:\n"); printf("\n\n\t\t\t\tpressao old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]); printf("\n"); } /*printf("\n\n\t\t\t\tb_U:\t\t\t\t\t\t\t\t\tb_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_R:\t\t\t\t\t\t\t\t\tb_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\t\t\t\t\t\t\t\t\tpressao_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tl_U:\t\t\t\t\t\t\t\t\tl_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\t\t\t\t\t\t\t\t\tl_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\t\t\t\t\t\t\t\t\tl_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\t\t\t\t\t\t\t\t\tl_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\t\t\t\t\t\t\t\t\tq_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\t\t\t\t\t\t\t\t\tq_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\t\t\t\t\t\t\t\t\tq_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\t\t\t\t\t\t\t\t\tq_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L_old[i*tam_mat_real + j]); printf("\n"); }*/ clear_mem(); // // system("pause"); return 0; } char le_entrada(){ printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n"); FILE *arq = NULL; //arq = fopen("../dir_entrada/parametro_entrada.txt", "r"); arq = fopen("parametro_entrada.txt", "r"); if(arq == NULL){ printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n"); exit(1); } else{ printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n"); /*char c[2], dados[255], buffer[255];*/ char buffer[255]; int cont = 1; while(cont < 9){ fscanf(arq, "%s", buffer); //puts(buffer); int i = 0, j = 0; switch(strlen(buffer)){ case 8: //erro_maximo fscanf(arq, "%f", &erro_max); break; case 10: //tam_regiao fscanf(arq, "%f", &tam_regiao); break; case 11: //opcao_contorno fscanf(arq, "%d", &op_contorno); break; case 12: //valor_contor fscanf(arq, "%f", &valor_contor); break; case 14: //max_interacoes fscanf(arq, "%d", &max_interacoes); break; case 15: //tam_mat_interna fscanf(arq, "%d", &tam_mat_interna); break; case 16: //matriz_de_fontes //uso (tam_mat_interna + 2) - pois ainda não inicializei 'tam_mat_real' host_mat.font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.font[i*(tam_mat_interna+2) + j]); break; case 18: //matriz_permeabilidade host_mat.perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); host_mat.epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.perm[i*(tam_mat_interna+2) + j]); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) host_mat.perm[i*(tam_mat_interna+2) + j] = PamM*exp(S * host_mat.perm[i*(tam_mat_interna+2) + j]); break; default: printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n"); return 0; } //int tam = strlen(buffer); cont++; } printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n"); } printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n"); return 1; } float *aloca_matriz(int L, int C){ float *aux = NULL; aux = (float *) calloc(L * C, sizeof(float)); if(aux == NULL){ printf("\n\n\t\tErro ao alocar memoria\n\n"); exit(1); }else{ return (aux); } return NULL; } /* * *VERIFICAR RETORNO * */ void cal_cond_robin(){ float keff = 0.0, numerador = 0.0, denominador = 0.0; float C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0 //Canto superior esquerdo numerador = ( 2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[tam_mat_real + 2] ); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[tam_mat_real + 2] ); keff = numerador / denominador; host_b.R[tam_mat_real + 1] = C*h/keff; numerador = (2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[(2*tam_mat_real) + 1]); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[(2*tam_mat_real) + 1]); keff = numerador / denominador; host_b.D[tam_mat_real + 1] = C*h/keff; //Canto superior direito numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[tam_mat_real + tam_mat_interna] = C*h/keff; //Canto infeior esquerdo numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); keff = numerador / denominador; host_b.R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; //Canto infeior direito numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; //Calculo das fronteiras e região interna para betas int i = 0; for(i = 2; i < tam_mat_interna; i ++){ //Calcula fronteira superior numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i-1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i-1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i+1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i+1)] ); keff = numerador / denominador; host_b.R[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[(2 * tam_mat_real) + i] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[(2 * tam_mat_real) + i] ); keff = numerador / denominador; host_b.D[tam_mat_real + i] = C*h/keff; //Calcula fronteira esquerda numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i - 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i - 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[(i * tam_mat_real) + 2] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[(i * tam_mat_real) + 2] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i + 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i + 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + 1] = C*h/keff; //Calcula fronteira inferior numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); keff = numerador / denominador; host_b.U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); keff = numerador / denominador; host_b.R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; //Calcula fronteira direita numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; //Calcula dados internos int j = 0; for(j = 2; j < tam_mat_interna; j ++){ numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j + 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j + 1)] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i - 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i - 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i + 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i + 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + j] = C*h/keff; } } } /* * *VERIFICAR RETORNO * */ char parametro_independentes(){ int i = 0, j = 0; float constante = 2/h; for(i = 0; i < tam_mat_real; i ++) for(j = 0; j < tam_mat_real; j++){ host_mat.epsilon[i*tam_mat_real + j] = constante * host_mat.perm[i*tam_mat_real + j]; host_mat.font[i*tam_mat_real + j] *= h; } return 0; } char copia_dados_para_gpu(){ HANDLE_ERROR( cudaMemcpy( dev_q.R, host_q.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.L, host_q.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.U, host_q.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.D, host_q.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.R_old, host_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.L_old, host_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.U_old, host_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.D_old, host_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.R, host_l.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.L, host_l.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.U, host_l.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.D, host_l.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.R_old, host_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.L_old, host_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.U_old, host_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.D_old, host_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.R, host_b.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.L, host_b.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.U, host_b.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.D, host_b.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.R_old, host_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.L_old, host_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.U_old, host_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.D_old, host_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_pressao.p, host_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_pressao.p_old, host_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_mat.perm, host_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_mat.epsilon, host_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_mat.font, host_mat.font, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); return 0; } void copia_dados_para_cpu(){ HANDLE_ERROR( cudaMemcpy( host_q.R, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.L, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.U, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.D, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.R_old, dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.L_old, dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.U_old, dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.D_old, dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.R, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.L, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.U, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.D, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.R_old, dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.L_old, dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.U_old, dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.D_old, dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.R, dev_b.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.L, dev_b.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.U, dev_b.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.D, dev_b.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.R_old, dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.L_old, dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.U_old, dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.D_old, dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_pressao.p, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_pressao.p_old, dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_mat.font, dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_mat.perm, dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_mat.epsilon, dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); } char inicializa_parametros(){ printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n"); /* * * * CONTRUIR FUNCAO PARA VERIFICAR ERRO DE ALOCAÇÃO * VERIFICAR RETORNO */ tam_mat_real = tam_mat_interna + 2; h = tam_regiao / tam_mat_interna; HANDLE_ERROR( cudaMalloc( (void**)&dev_q, sizeof(ESTRUTURA_Q) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_l, sizeof(ESTRUTURA_L) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, sizeof(ESTRUTURA_B) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao, sizeof(ESTRUTURA_PRESSAO) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat, sizeof(ESTRUTURA_MAT) ) ); host_q.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_aux_1, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_aux_2, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMemset( dev_aux_1, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMemset( dev_aux_2, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&erro_max, sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_erro, sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_media, tam_mat_real * tam_mat_real * sizeof(float)) ); host_aux_1 = aloca_matriz(tam_mat_real, tam_mat_real); int i = 0; switch(op_contorno){ case 1: //Inicializa contorno superior for(i = 0; i < tam_mat_real; i++){ host_q.D[i] = valor_contor; host_q.D_old[i] = valor_contor; } break; case 2://Inicializa contorno esquerdo for(i = 0; i < tam_mat_real; i++){ host_q.R[i*tam_mat_real] = valor_contor; host_q.R_old[i*tam_mat_real] = valor_contor; } break; case 3://Inicializa contorno direito for(i = 0; i < tam_mat_real; i++){ host_q.L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; host_q.L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; } break; case 4://Inicializa contorno inferior for(i = 0; i < tam_mat_real; i++){ host_q.L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; host_q.L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; } break; default: printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n"); break; } printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n"); return 1; } void clear_mem(){ HANDLE_ERROR( cudaFree (dev_q.U)); HANDLE_ERROR( cudaFree (dev_q.R)); HANDLE_ERROR( cudaFree (dev_q.D)); HANDLE_ERROR( cudaFree (dev_q.L)); free(host_q.U); free(host_q.R); free(host_q.D); free(host_q.L); HANDLE_ERROR( cudaFree (dev_l.U)); HANDLE_ERROR( cudaFree (dev_l.R)); HANDLE_ERROR( cudaFree (dev_l.D)); HANDLE_ERROR( cudaFree (dev_l.L)); free(host_l.U); free(host_l.R); free(host_l.D); free(host_l.L); HANDLE_ERROR( cudaFree (dev_b.U)); HANDLE_ERROR( cudaFree (dev_b.R)); HANDLE_ERROR( cudaFree (dev_b.D)); HANDLE_ERROR( cudaFree (dev_b.L)); free(host_b.U); free(host_b.R); free(host_b.D); free(host_b.L); HANDLE_ERROR( cudaFree (dev_pressao.p)); HANDLE_ERROR( cudaFree (dev_pressao.p_old)); free(host_pressao.p); free(host_pressao.p_old); HANDLE_ERROR( cudaFree (dev_mat.perm)); HANDLE_ERROR( cudaFree (dev_mat.font)); HANDLE_ERROR( cudaFree (dev_mat.epsilon)); free(host_mat.perm); free(host_mat.font); free(host_mat.epsilon); }
b6c47fe2ed94696b96aa2eeb5bf6672b95897f05.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2018-present Antonio Mallia <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "benchmark/benchmark.h" #include "synthetic.hpp" #include "gpu_ic/cuda_bp.cuh" #include "gpu_ic/utils/utils.hpp" #include "gpu_ic/utils/cuda_utils.hpp" template<typename Generator, size_t block_size> class ValuesFixture : public ::benchmark::Fixture { public: using ::benchmark::Fixture::SetUp; using ::benchmark::Fixture::TearDown; virtual void SetUp(::benchmark::State& st) { using namespace gpu_ic; Generator clu(1); values = clu.generate(st.range(0), 1U << 29); utils::delta_encode(values.data(), values.size()); encoded_values.resize(values.size() * 8); auto compressedsize = cuda_bp::encode<block_size>(encoded_values.data(), values.data(), values.size()); encoded_values.resize(compressedsize); encoded_values.shrink_to_fit(); decoded_values.resize(values.size()); CUDA_CHECK_ERROR(hipSetDevice(0)); hipLaunchKernelGGL(( warmUpGPU), dim3(1), dim3(1), 0, 0, ); CUDA_CHECK_ERROR(hipMalloc((void **)&d_encoded, encoded_values.size() * sizeof(uint8_t))); CUDA_CHECK_ERROR(hipMemcpy(d_encoded, encoded_values.data(), encoded_values.size() * sizeof(uint8_t), hipMemcpyHostToDevice)); CUDA_CHECK_ERROR(hipMalloc((void **)&d_decoded, values.size() * sizeof(uint32_t))); CUDA_CHECK_ERROR(hipDeviceSynchronize()); } virtual void TearDown(::benchmark::State&) { CUDA_CHECK_ERROR(hipMemcpy(decoded_values.data(), d_decoded, values.size() * sizeof(uint32_t), hipMemcpyDeviceToHost)); ASSERT_EQ(decoded_values.size(), values.size()); for (size_t i = 0; i < values.size(); ++i) { ASSERT_EQ(decoded_values[i], values[i]); } hipFree(d_encoded); hipFree(d_decoded); values.clear(); encoded_values.clear(); decoded_values.clear(); } std::vector<uint32_t> values; std::vector<uint8_t> encoded_values; std::vector<uint32_t> decoded_values; uint8_t * d_encoded; uint32_t * d_decoded; }; BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeUniform128, gpu_ic::UniformDataGenerator, 128)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<128>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(hipDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeUniform128)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeUniform256, gpu_ic::UniformDataGenerator, 256)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<256>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(hipDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeUniform256)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeClustered128, gpu_ic::ClusteredDataGenerator, 128)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<128>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(hipDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeClustered128)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeClustered256, gpu_ic::ClusteredDataGenerator, 256)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<256>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(hipDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeClustered256)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_MAIN();
b6c47fe2ed94696b96aa2eeb5bf6672b95897f05.cu
/** * Copyright 2018-present Antonio Mallia <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda.h> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "benchmark/benchmark.h" #include "synthetic.hpp" #include "gpu_ic/cuda_bp.cuh" #include "gpu_ic/utils/utils.hpp" #include "gpu_ic/utils/cuda_utils.hpp" template<typename Generator, size_t block_size> class ValuesFixture : public ::benchmark::Fixture { public: using ::benchmark::Fixture::SetUp; using ::benchmark::Fixture::TearDown; virtual void SetUp(::benchmark::State& st) { using namespace gpu_ic; Generator clu(1); values = clu.generate(st.range(0), 1U << 29); utils::delta_encode(values.data(), values.size()); encoded_values.resize(values.size() * 8); auto compressedsize = cuda_bp::encode<block_size>(encoded_values.data(), values.data(), values.size()); encoded_values.resize(compressedsize); encoded_values.shrink_to_fit(); decoded_values.resize(values.size()); CUDA_CHECK_ERROR(cudaSetDevice(0)); warmUpGPU<<<1, 1>>>(); CUDA_CHECK_ERROR(cudaMalloc((void **)&d_encoded, encoded_values.size() * sizeof(uint8_t))); CUDA_CHECK_ERROR(cudaMemcpy(d_encoded, encoded_values.data(), encoded_values.size() * sizeof(uint8_t), cudaMemcpyHostToDevice)); CUDA_CHECK_ERROR(cudaMalloc((void **)&d_decoded, values.size() * sizeof(uint32_t))); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); } virtual void TearDown(::benchmark::State&) { CUDA_CHECK_ERROR(cudaMemcpy(decoded_values.data(), d_decoded, values.size() * sizeof(uint32_t), cudaMemcpyDeviceToHost)); ASSERT_EQ(decoded_values.size(), values.size()); for (size_t i = 0; i < values.size(); ++i) { ASSERT_EQ(decoded_values[i], values[i]); } cudaFree(d_encoded); cudaFree(d_decoded); values.clear(); encoded_values.clear(); decoded_values.clear(); } std::vector<uint32_t> values; std::vector<uint8_t> encoded_values; std::vector<uint32_t> decoded_values; uint8_t * d_encoded; uint32_t * d_decoded; }; BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeUniform128, gpu_ic::UniformDataGenerator, 128)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<128>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeUniform128)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeUniform256, gpu_ic::UniformDataGenerator, 256)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<256>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeUniform256)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeClustered128, gpu_ic::ClusteredDataGenerator, 128)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<128>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeClustered128)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_TEMPLATE_DEFINE_F(ValuesFixture, decodeClustered256, gpu_ic::ClusteredDataGenerator, 256)(benchmark::State& state) { while (state.KeepRunning()) { cuda_bp::decode<256>(d_decoded, d_encoded, decoded_values.size()); CUDA_CHECK_ERROR(cudaDeviceSynchronize()); } auto bpi = double(8*encoded_values.size())/decoded_values.size(); state.counters["bpi"] = benchmark::Counter(bpi, benchmark::Counter::kAvgThreads); } BENCHMARK_REGISTER_F(ValuesFixture, decodeClustered256)->RangeMultiplier(2)->Range((1ULL << 15), (1ULL<<25)); BENCHMARK_MAIN();
7d5e0f9e32ddabdc19885e529cf4a77df55f088a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Image Blurring // // In this kernel we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays : // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int r = blockDim.y * blockIdx.y + threadIdx.y; int c = blockDim.x * blockIdx.x + threadIdx.x; if(r >= numRows || c >= numCols) return; //Copy filter to shared memory //const int filter_size = filterWidth*filterWidth; extern __shared__ float sh_filter[]; if(threadIdx.x < filterWidth && threadIdx.y < filterWidth){ sh_filter[threadIdx.x * filterWidth + threadIdx.y] = filter[threadIdx.x * filterWidth + threadIdx.y]; } __syncthreads(); const int halfWidth = filterWidth/2; float result = 0.0f; //row and col after clamping int image_r = 0, image_c = 0; for(int filter_r = -halfWidth; filter_r <= halfWidth; filter_r++){ for(int filter_c = -halfWidth; filter_c <= halfWidth; filter_c++){ //clamp image boundary image_r = min(max(r + filter_r, 0), numRows - 1); image_c = min(max(c + filter_c, 0), numCols - 1); const float filter_value = sh_filter[(filter_r + halfWidth) * filterWidth + filter_c + halfWidth]; const float image_value = inputChannel[image_r * numCols + image_c]; result += filter_value * image_value; } } outputChannel[r * numCols + c] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { int r = blockDim.y * blockIdx.y + threadIdx.y; int c = blockDim.x * blockIdx.x + threadIdx.x; if(r >= numRows || c >= numCols) return; const int indx = r * numCols + c; uchar4 cell = inputImageRGBA[indx]; redChannel[indx] = cell.x; greenChannel[indx] = cell.y; blueChannel[indx] = cell.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const dim3 blockSize(32, 32); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize((numCols + blockSize.y - 1) / blockSize.y, (numRows + blockSize.x - 1) / blockSize.x); //Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //Call your convolution kernel here 3 times, once for each color channel. int shared_size = filterWidth * filterWidth * sizeof(float); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_size, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_size, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_size, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
7d5e0f9e32ddabdc19885e529cf4a77df55f088a.cu
// Image Blurring // // In this kernel we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays : // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. int r = blockDim.y * blockIdx.y + threadIdx.y; int c = blockDim.x * blockIdx.x + threadIdx.x; if(r >= numRows || c >= numCols) return; //Copy filter to shared memory //const int filter_size = filterWidth*filterWidth; extern __shared__ float sh_filter[]; if(threadIdx.x < filterWidth && threadIdx.y < filterWidth){ sh_filter[threadIdx.x * filterWidth + threadIdx.y] = filter[threadIdx.x * filterWidth + threadIdx.y]; } __syncthreads(); const int halfWidth = filterWidth/2; float result = 0.0f; //row and col after clamping int image_r = 0, image_c = 0; for(int filter_r = -halfWidth; filter_r <= halfWidth; filter_r++){ for(int filter_c = -halfWidth; filter_c <= halfWidth; filter_c++){ //clamp image boundary image_r = min(max(r + filter_r, 0), numRows - 1); image_c = min(max(c + filter_c, 0), numCols - 1); const float filter_value = sh_filter[(filter_r + halfWidth) * filterWidth + filter_c + halfWidth]; const float image_value = inputChannel[image_r * numCols + image_c]; result += filter_value * image_value; } } outputChannel[r * numCols + c] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { int r = blockDim.y * blockIdx.y + threadIdx.y; int c = blockDim.x * blockIdx.x + threadIdx.x; if(r >= numRows || c >= numCols) return; const int indx = r * numCols + c; uchar4 cell = inputImageRGBA[indx]; redChannel[indx] = cell.x; greenChannel[indx] = cell.y; blueChannel[indx] = cell.z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const dim3 blockSize(32, 32); //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize((numCols + blockSize.y - 1) / blockSize.y, (numRows + blockSize.x - 1) / blockSize.x); //Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //Call your convolution kernel here 3 times, once for each color channel. int shared_size = filterWidth * filterWidth * sizeof(float); gaussian_blur<<<gridSize, blockSize, shared_size>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize, shared_size>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize, shared_size>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
c40855ff28ebe8489d55c90494296435e0269191.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> // Lattice dimensions and thread_num #define thread_num 512// Must be 2^k #define grid_dim_x 128 #define grid_dim_z 16384 #define grid_size 2097152 #define iter 250 #define iterbal 50 void calc_cpu(float B, float kT, float QX, float QZ, float *E_avg, float *M_avg, float *E_var, float *M_var); __global__ void set_lattice(bool *lattice); __global__ void iterate_grid(float B, float kT, float Q, float QZ, bool round, bool *dev_lattice, float *d_E_vec, float *d_M_vec, int seed); __global__ void reset_vec(float *vec); __global__ void vec_sum(float *vec, float *result); __global__ void set_val(float *variable, float value); __global__ void add_val(float *variable, float *addition); __device__ int posMod(int number, int modulus); __device__ int indexMap(int xi, int yi, int zi); // Inn Accumulator eru lesin gildi rtaki, haldi er utan um mealgildi og // dreifni ess rtaks. class Accumulator { private: int N; float m; float s ; // Fastayring gagna: // N er fjldi talna v rtaki sem hefur veri lesi inn eintak af Accumulator, N >= 0 // m er mealtal talna v rtaki sem hefur veri lesi inn eintak af Accumulator // s er summa ferningsfrvika (frvik srhvers gildis fr mealtali, ru veldi), v // rtaki sem hefur veri lesi inn eintak af Accumulator, s >= 0 public: // N: Accumulator a; // F: Ekkert // E: a er ntt eintak af Accumulator, sem engar tlur hafa lesnar inn . // ll ggn a hafa veri nllstillt, a er a.N = 0, a.m = 0.0 og a.s = 0.0 Accumulator() { N = 0; m = 0.0; s = 0.0; } // N: a.addDataValue(x) // F: Ekkert // E: Bi er a bta x rtaki a void addDataValue(float x) { N++; s = s + 1.0*(N-1)/N*(x-m)*(x-m); m = m + (x-m)/N; } // N: x = a.mean() // F: Ekkert // E: x inniheldur mealtal talna rtakinu a float mean() { return m; } // N: x = a.var() // F: N > 1 // E: x inniheldur dreifni talna rtakinu a float var() { return s/(N-1); } // N: x = a.stddev() // F: N > 1 // E: x inniheldur staalfrvik talna rtakinu a float stddev ( ) { return sqrt(s/(N-1)); } }; int main(){ // Minimum and maximum values of B, and number of steps. // If Bsteps = 1, then only Bmin is used. float B; float Bmin = 0.0; float Bmax = 1.0; int Bsteps = 1; // Minimum and maximum values of kT, and number of steps. // If kTsteps = 1, then only kTmin is used. float kT; float kTmin = 0.5; float kTmax = 7.0; int kTsteps = 1; // Minimum and maximum values of QY, and number of steps. // If QYsteps = 1, then only Qmin is used. float QY; float QYmin = -1.0; float QYmax = 1.0; int QYsteps = 1; // Minimum and maximum values of QZ, and number of steps. // If QZsteps = 1, then only Qmin is used. float QZ; float QZmin = -1.0; float QZmax = 1.0; int QZsteps = 1; srand(time(NULL)); // Seed CPU RNG float Emean; float Mmean; float Evar; float Mvar; char filename[20]; sprintf(filename, "results.dat"); FILE *fp; fp = fopen(filename, "w"); for (int i=0;i<Bsteps;i++){ // B loop if (Bsteps>1){ B = Bmin + i*(Bmax-Bmin)/(Bsteps-1); } else{ B = Bmin; } for (int l=0; l<QZsteps; l++){ // QZ loop if (QZsteps>1){ QZ = QZmin + l*(QZmax-QZmin)/(QZsteps-1); } else{ QZ = QZmin; } for(int k=0; k<QYsteps; k++){ // QY loop if (QYsteps>1){ QY = QYmin + k*(QYmax-QYmin)/(QYsteps-1); } else{ QY = QYmin; } for(int j=0; j<kTsteps; j++){ // kT loop if (kTsteps>1){ kT = kTmin + j*(kTmax-kTmin)/(kTsteps-1); } else{ kT = kTmin; } printf("Performing calculation at B=%g, kT=%g, QY=%g, QZ=%g\n", B, kT, QY, QZ); calc_cpu(B, kT, QY, QZ, &Emean, &Mmean, &Evar, &Mvar); fprintf(fp, "%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\n", B, kT, QY, QZ, Emean, Mmean, Evar, Mvar); } // kT loop end } // QY loop end } // QZ loop end } // B loop end fclose(fp); } // U: calc_cpu(...) // B: kT > 0, n => 0 // A: The results of an ising simulation at magnetic field B and // temperature kT have been stored in Earr[n] (mean energy) // and Marr[n] (mean magnetization) void calc_cpu(float B, float kT, float QX, float QZ, float *E_avg_out, float *M_avg_out, float *E_var_out, float *M_var_out){ // Degbug things // Template: // hipMemcpy( &buggy, dev_value, sizeof(float), hipMemcpyDeviceToHost); // printf("%g\n",buggy); /*float buggy;*/ /*float buggyvec[thread_num];*/ // Create, allocate memory for and set lattice bool *dev_lattice; hipMalloc( (void**)&dev_lattice, grid_size*sizeof(bool) ); hipLaunchKernelGGL(( set_lattice), dim3(1), dim3(thread_num), 0, 0, dev_lattice); float *dev_dEvec; float *dev_dMvec; hipMalloc( (void**)&dev_dEvec, thread_num*sizeof(float) ); hipMalloc( (void**)&dev_dMvec, thread_num*sizeof(float) ); float *dev_Etot; float *dev_Mtot; /*float *dev_Eavg;*/ /*float *dev_Mavg;*/ hipMalloc( (void**)&dev_Etot, sizeof(float) ); hipMalloc( (void**)&dev_Mtot, sizeof(float) ); /*hipMalloc( (void**)&dev_Eavg, sizeof(float) );*/ /*hipMalloc( (void**)&dev_Mavg, sizeof(float) );*/ hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, dev_Etot, grid_size*(-2.0-2.0*QX-2.0*QZ-B)); hipLaunchKernelGGL(( set_val), dim3(1),dim3(1), 0, 0, dev_Mtot, grid_size); /*set_val<<<1,1>>>(dev_Eavg, 0.0);*/ /*set_val<<<1,1>>>(dev_Mavg, 0.0);*/ Accumulator energy; Accumulator magnet; float Etot; float Mtot; for (int j=0; j<iter; j++){ hipLaunchKernelGGL(( reset_vec), dim3(1), dim3(thread_num), 0, 0, dev_dEvec); hipLaunchKernelGGL(( reset_vec), dim3(1), dim3(thread_num), 0, 0, dev_dMvec); hipLaunchKernelGGL(( iterate_grid), dim3(1), dim3(thread_num), 0, 0, B, kT, QX, QZ, 0, dev_lattice, dev_dEvec, dev_dMvec, rand() ); hipLaunchKernelGGL(( iterate_grid), dim3(1), dim3(thread_num), 0, 0, B, kT, QX, QZ, 1, dev_lattice, dev_dEvec, dev_dMvec, rand() ); hipLaunchKernelGGL(( vec_sum), dim3(1), dim3(thread_num), 0, 0, dev_dEvec, dev_Etot); hipLaunchKernelGGL(( vec_sum), dim3(1), dim3(thread_num), 0, 0, dev_dMvec, dev_Mtot); if (j>iterbal){ hipMemcpy( &Etot, dev_Etot, sizeof(float), hipMemcpyDeviceToHost); hipMemcpy( &Mtot, dev_Mtot, sizeof(float), hipMemcpyDeviceToHost); Etot = Etot/grid_size; Mtot = Mtot/grid_size; energy.addDataValue(Etot); magnet.addDataValue(Mtot); } } *E_avg_out = energy.mean(); *M_avg_out = magnet.mean(); *E_var_out = energy.var(); *M_var_out = magnet.var(); hipFree(dev_lattice); hipFree(dev_dEvec); hipFree(dev_dMvec); hipFree(dev_Etot); hipFree(dev_Mtot); } // U:hipLaunchKernelGGL(( set_lattice), dim3(1), dim3(thread_num), 0, 0, dev_lattice); // B: dev_lattice points to allocated device memory for grid_size bool numbers // A: all elements of dev_lattice are set to 1 __global__ void set_lattice(bool *lattice){ int tid = threadIdx.x; for (int i=tid;i<grid_size;i+=thread_num){ lattice[i] = 1; } } // U:hipLaunchKernelGGL(( iterate_grid), dim3(1), dim3(thread_num), 0, 0, ...) // B: // A: One ising iteration has been performed over a checkerboard. If round=0 it's over the white squares, if round=1 it's over // the black squares. The change done by each thread has been added to d_E_vec[tid] and d_M_vec[tid] __global__ void iterate_grid(float B, float kT, float QX, float QZ, bool round, bool *dev_lattice, float *d_E_vec, float *d_M_vec, int seed){ int tid=threadIdx.x; hiprandState_t state; hiprand_init(seed+tid, 0, 0, &state); int si; float ssum; float delta_E; float delta_M; float p; float r; int xi; int yi; int zi; for (int i=round+2*tid;i<grid_size;i+=2*thread_num){ zi = i/grid_dim_z; if ((zi%2)==0){ yi = (i%grid_dim_z)/grid_dim_x; } else{ yi = grid_dim_x-(i%grid_dim_z)/grid_dim_x-1; } if ((yi+zi)%2 == 0){ xi = i%grid_dim_x; } else{ xi = grid_dim_x - i%grid_dim_x - 1; } si = 2*dev_lattice[i]-1; ssum = 2*dev_lattice[indexMap(xi-1,yi,zi)] +2*dev_lattice[indexMap(xi+1,yi,zi)] -2 +QX*2*dev_lattice[indexMap(xi,yi-1,zi)] +QX*2*dev_lattice[indexMap(xi,yi+1,zi)] -QX*2 +QZ*2*dev_lattice[indexMap(xi,yi,zi-1)] +QZ*2*dev_lattice[indexMap(xi,yi,zi+1)] -QZ*2; delta_E = 2*si*(ssum+B); delta_M = -2*si; if (delta_E < 0){ p = 1; } else{ p = exp(-delta_E/kT); } r = hiprand_uniform(&state); if (r<p){ // Spin flip! d_E_vec[tid] += delta_E; d_M_vec[tid] += delta_M; dev_lattice[i] = !( dev_lattice[i] ); } } } // U:hipLaunchKernelGGL(( reset_vec), dim3(1), dim3(thread_num), 0, 0, dev_vec) // B: dev_vec has been allocated device memory for thread_num float numbers // A: All elements of dev_vec have been set as 0.0 __global__ void reset_vec(float *vec){ vec[threadIdx.x] = 0.0; } // U:hipLaunchKernelGGL(( vec_sum), dim3(1), dim3(thread_num), 0, 0, dev_vec, dev_result) // B: dev_vec has length thread_num // A: The sum of elements in dev_vec has been added to result __global__ void vec_sum(float *vec, float *result){ // Right multithread version (has to use threads) int tid = threadIdx.x; int offset = thread_num>>1; while (offset>0){ if (tid < offset){ vec[tid] += vec[tid+offset]; } __syncthreads(); offset=offset>>1; } if (tid==0){ *result += vec[0]; } // Right single thread version /*int tid = threadIdx.x;*/ /*if (tid == 0){*/ /*for (int i=1;i<thread_num;i++){*/ /*vec[0] += vec[i];*/ /*}*/ /**result += vec[0];*/ /*}*/ } // U: set_val<<<1, 1>>>(variable, value) // B: // A: *variable = value __global__ void set_val(float *variable, float value){ *variable = value; } // U: add_val<<<1, 1>>>(variable, addition) // B: // A: *variabe += *addition __global__ void add_val(float *variable, float *addition){ *variable += *addition; } // U: z = posMod(n,m) // B: m > 0 // A: z = n%m if n>=0, z = n%m + m if n < 0 __device__ int posMod(int number, int modulus){ int result = number%modulus; if (result<0){ result +=modulus; } return result; } __device__ int indexMap(int xi, int yi, int zi){ xi = posMod(xi,grid_dim_x); yi = posMod(yi,grid_dim_x); zi = posMod(zi,grid_dim_x); int i = zi*grid_dim_z; if (zi%2==0){ i += yi*grid_dim_x; } else{ i += grid_dim_z-yi*grid_dim_x-grid_dim_x; } if ((yi+zi)%2 == 0){ i += xi; } else{ i += grid_dim_x-xi-1; } return i; }
c40855ff28ebe8489d55c90494296435e0269191.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda_runtime.h> #include <curand_kernel.h> // Lattice dimensions and thread_num #define thread_num 512// Must be 2^k #define grid_dim_x 128 #define grid_dim_z 16384 #define grid_size 2097152 #define iter 250 #define iterbal 50 void calc_cpu(float B, float kT, float QX, float QZ, float *E_avg, float *M_avg, float *E_var, float *M_var); __global__ void set_lattice(bool *lattice); __global__ void iterate_grid(float B, float kT, float Q, float QZ, bool round, bool *dev_lattice, float *d_E_vec, float *d_M_vec, int seed); __global__ void reset_vec(float *vec); __global__ void vec_sum(float *vec, float *result); __global__ void set_val(float *variable, float value); __global__ void add_val(float *variable, float *addition); __device__ int posMod(int number, int modulus); __device__ int indexMap(int xi, int yi, int zi); // Inn í Accumulator eru lesin gildi í úrtaki, haldið er utan um meðalgildi og // dreifni þess úrtaks. class Accumulator { private: int N; float m; float s ; // Fastayrðing gagna: // N er fjöldi talna í því úrtaki sem hefur verið lesið inn í eintak af Accumulator, N >= 0 // m er meðaltal talna í því úrtaki sem hefur verið lesið inn í eintak af Accumulator // s er summa ferningsfrávika (frávik sérhvers gildis frá meðaltali, í öðru veldi), í því // úrtaki sem hefur verið lesið inn í eintak af Accumulator, s >= 0 public: // N: Accumulator a; // F: Ekkert // E: a er nýtt eintak af Accumulator, sem engar tölur hafa lesnar inn í. // Öll gögn í a hafa verið núllstillt, það er a.N = 0, a.m = 0.0 og a.s = 0.0 Accumulator() { N = 0; m = 0.0; s = 0.0; } // N: a.addDataValue(x) // F: Ekkert // E: Búið er að bæta x í úrtakið a void addDataValue(float x) { N++; s = s + 1.0*(N-1)/N*(x-m)*(x-m); m = m + (x-m)/N; } // N: x = a.mean() // F: Ekkert // E: x inniheldur meðaltal talna í úrtakinu a float mean() { return m; } // N: x = a.var() // F: N > 1 // E: x inniheldur dreifni talna í úrtakinu a float var() { return s/(N-1); } // N: x = a.stddev() // F: N > 1 // E: x inniheldur staðalfrávik talna í úrtakinu a float stddev ( ) { return sqrt(s/(N-1)); } }; int main(){ // Minimum and maximum values of B, and number of steps. // If Bsteps = 1, then only Bmin is used. float B; float Bmin = 0.0; float Bmax = 1.0; int Bsteps = 1; // Minimum and maximum values of kT, and number of steps. // If kTsteps = 1, then only kTmin is used. float kT; float kTmin = 0.5; float kTmax = 7.0; int kTsteps = 1; // Minimum and maximum values of QY, and number of steps. // If QYsteps = 1, then only Qmin is used. float QY; float QYmin = -1.0; float QYmax = 1.0; int QYsteps = 1; // Minimum and maximum values of QZ, and number of steps. // If QZsteps = 1, then only Qmin is used. float QZ; float QZmin = -1.0; float QZmax = 1.0; int QZsteps = 1; srand(time(NULL)); // Seed CPU RNG float Emean; float Mmean; float Evar; float Mvar; char filename[20]; sprintf(filename, "results.dat"); FILE *fp; fp = fopen(filename, "w"); for (int i=0;i<Bsteps;i++){ // B loop if (Bsteps>1){ B = Bmin + i*(Bmax-Bmin)/(Bsteps-1); } else{ B = Bmin; } for (int l=0; l<QZsteps; l++){ // QZ loop if (QZsteps>1){ QZ = QZmin + l*(QZmax-QZmin)/(QZsteps-1); } else{ QZ = QZmin; } for(int k=0; k<QYsteps; k++){ // QY loop if (QYsteps>1){ QY = QYmin + k*(QYmax-QYmin)/(QYsteps-1); } else{ QY = QYmin; } for(int j=0; j<kTsteps; j++){ // kT loop if (kTsteps>1){ kT = kTmin + j*(kTmax-kTmin)/(kTsteps-1); } else{ kT = kTmin; } printf("Performing calculation at B=%g, kT=%g, QY=%g, QZ=%g\n", B, kT, QY, QZ); calc_cpu(B, kT, QY, QZ, &Emean, &Mmean, &Evar, &Mvar); fprintf(fp, "%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\n", B, kT, QY, QZ, Emean, Mmean, Evar, Mvar); } // kT loop end } // QY loop end } // QZ loop end } // B loop end fclose(fp); } // U: calc_cpu(...) // B: kT > 0, n => 0 // A: The results of an ising simulation at magnetic field B and // temperature kT have been stored in Earr[n] (mean energy) // and Marr[n] (mean magnetization) void calc_cpu(float B, float kT, float QX, float QZ, float *E_avg_out, float *M_avg_out, float *E_var_out, float *M_var_out){ // Degbug things // Template: // cudaMemcpy( &buggy, dev_value, sizeof(float), cudaMemcpyDeviceToHost); // printf("%g\n",buggy); /*float buggy;*/ /*float buggyvec[thread_num];*/ // Create, allocate memory for and set lattice bool *dev_lattice; cudaMalloc( (void**)&dev_lattice, grid_size*sizeof(bool) ); set_lattice<<<1, thread_num>>>(dev_lattice); float *dev_dEvec; float *dev_dMvec; cudaMalloc( (void**)&dev_dEvec, thread_num*sizeof(float) ); cudaMalloc( (void**)&dev_dMvec, thread_num*sizeof(float) ); float *dev_Etot; float *dev_Mtot; /*float *dev_Eavg;*/ /*float *dev_Mavg;*/ cudaMalloc( (void**)&dev_Etot, sizeof(float) ); cudaMalloc( (void**)&dev_Mtot, sizeof(float) ); /*cudaMalloc( (void**)&dev_Eavg, sizeof(float) );*/ /*cudaMalloc( (void**)&dev_Mavg, sizeof(float) );*/ set_val<<<1,1>>>(dev_Etot, grid_size*(-2.0-2.0*QX-2.0*QZ-B)); set_val<<<1,1>>>(dev_Mtot, grid_size); /*set_val<<<1,1>>>(dev_Eavg, 0.0);*/ /*set_val<<<1,1>>>(dev_Mavg, 0.0);*/ Accumulator energy; Accumulator magnet; float Etot; float Mtot; for (int j=0; j<iter; j++){ reset_vec<<<1, thread_num>>>(dev_dEvec); reset_vec<<<1, thread_num>>>(dev_dMvec); iterate_grid<<<1, thread_num>>>(B, kT, QX, QZ, 0, dev_lattice, dev_dEvec, dev_dMvec, rand() ); iterate_grid<<<1, thread_num>>>(B, kT, QX, QZ, 1, dev_lattice, dev_dEvec, dev_dMvec, rand() ); vec_sum<<<1, thread_num>>>(dev_dEvec, dev_Etot); vec_sum<<<1, thread_num>>>(dev_dMvec, dev_Mtot); if (j>iterbal){ cudaMemcpy( &Etot, dev_Etot, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy( &Mtot, dev_Mtot, sizeof(float), cudaMemcpyDeviceToHost); Etot = Etot/grid_size; Mtot = Mtot/grid_size; energy.addDataValue(Etot); magnet.addDataValue(Mtot); } } *E_avg_out = energy.mean(); *M_avg_out = magnet.mean(); *E_var_out = energy.var(); *M_var_out = magnet.var(); cudaFree(dev_lattice); cudaFree(dev_dEvec); cudaFree(dev_dMvec); cudaFree(dev_Etot); cudaFree(dev_Mtot); } // U: set_lattice<<<1, thread_num>>>(dev_lattice); // B: dev_lattice points to allocated device memory for grid_size bool numbers // A: all elements of dev_lattice are set to 1 __global__ void set_lattice(bool *lattice){ int tid = threadIdx.x; for (int i=tid;i<grid_size;i+=thread_num){ lattice[i] = 1; } } // U: iterate_grid<<<1, thread_num>>>(...) // B: // A: One ising iteration has been performed over a checkerboard. If round=0 it's over the white squares, if round=1 it's over // the black squares. The change done by each thread has been added to d_E_vec[tid] and d_M_vec[tid] __global__ void iterate_grid(float B, float kT, float QX, float QZ, bool round, bool *dev_lattice, float *d_E_vec, float *d_M_vec, int seed){ int tid=threadIdx.x; curandState_t state; curand_init(seed+tid, 0, 0, &state); int si; float ssum; float delta_E; float delta_M; float p; float r; int xi; int yi; int zi; for (int i=round+2*tid;i<grid_size;i+=2*thread_num){ zi = i/grid_dim_z; if ((zi%2)==0){ yi = (i%grid_dim_z)/grid_dim_x; } else{ yi = grid_dim_x-(i%grid_dim_z)/grid_dim_x-1; } if ((yi+zi)%2 == 0){ xi = i%grid_dim_x; } else{ xi = grid_dim_x - i%grid_dim_x - 1; } si = 2*dev_lattice[i]-1; ssum = 2*dev_lattice[indexMap(xi-1,yi,zi)] +2*dev_lattice[indexMap(xi+1,yi,zi)] -2 +QX*2*dev_lattice[indexMap(xi,yi-1,zi)] +QX*2*dev_lattice[indexMap(xi,yi+1,zi)] -QX*2 +QZ*2*dev_lattice[indexMap(xi,yi,zi-1)] +QZ*2*dev_lattice[indexMap(xi,yi,zi+1)] -QZ*2; delta_E = 2*si*(ssum+B); delta_M = -2*si; if (delta_E < 0){ p = 1; } else{ p = exp(-delta_E/kT); } r = curand_uniform(&state); if (r<p){ // Spin flip! d_E_vec[tid] += delta_E; d_M_vec[tid] += delta_M; dev_lattice[i] = !( dev_lattice[i] ); } } } // U: reset_vec<<<1, thread_num>>>(dev_vec) // B: dev_vec has been allocated device memory for thread_num float numbers // A: All elements of dev_vec have been set as 0.0 __global__ void reset_vec(float *vec){ vec[threadIdx.x] = 0.0; } // U: vec_sum<<<1, thread_num>>>(dev_vec, dev_result) // B: dev_vec has length thread_num // A: The sum of elements in dev_vec has been added to result __global__ void vec_sum(float *vec, float *result){ // Right multithread version (has to use threads) int tid = threadIdx.x; int offset = thread_num>>1; while (offset>0){ if (tid < offset){ vec[tid] += vec[tid+offset]; } __syncthreads(); offset=offset>>1; } if (tid==0){ *result += vec[0]; } // Right single thread version /*int tid = threadIdx.x;*/ /*if (tid == 0){*/ /*for (int i=1;i<thread_num;i++){*/ /*vec[0] += vec[i];*/ /*}*/ /**result += vec[0];*/ /*}*/ } // U: set_val<<<1, 1>>>(variable, value) // B: // A: *variable = value __global__ void set_val(float *variable, float value){ *variable = value; } // U: add_val<<<1, 1>>>(variable, addition) // B: // A: *variabe += *addition __global__ void add_val(float *variable, float *addition){ *variable += *addition; } // U: z = posMod(n,m) // B: m > 0 // A: z = n%m if n>=0, z = n%m + m if n < 0 __device__ int posMod(int number, int modulus){ int result = number%modulus; if (result<0){ result +=modulus; } return result; } __device__ int indexMap(int xi, int yi, int zi){ xi = posMod(xi,grid_dim_x); yi = posMod(yi,grid_dim_x); zi = posMod(zi,grid_dim_x); int i = zi*grid_dim_z; if (zi%2==0){ i += yi*grid_dim_x; } else{ i += grid_dim_z-yi*grid_dim_x-grid_dim_x; } if ((yi+zi)%2 == 0){ i += xi; } else{ i += grid_dim_x-xi-1; } return i; }
d2c724cd55dc0f4f5cecdc833eadbdb3080f5c35.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <assert.h> #define N 2//64 __device__ int bar () { return 0; } __global__ void foo() { __assert(bar () !=0); }
d2c724cd55dc0f4f5cecdc833eadbdb3080f5c35.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <assert.h> #define N 2//64 __device__ int bar () { return 0; } __global__ void foo() { __assert(bar () !=0); }
c26c3123ecaecf5f98d807be3a83a8cfb22d3ec9.hip
// !!! This is a file automatically generated by hipify!!! /* Host code for the Jacobi method of solving a system of linear equations * by iteration. * Build as follws: make clean && make * Author: Naga Kandasamy * Date modified: February 23, 2021 * * Student name(s); Kevin Connell, Casey Adams * Date modified: 3/2/2021 */ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include "jacobi_iteration.h" /* Include the kernel code */ #include "jacobi_iteration_kernel.hip" /* Uncomment the line below if you want the code to spit out debug information. */ /* #define DEBUG */ int main(int argc, char **argv) { if (argc > 1) { printf("This program accepts no arguments\n"); exit(EXIT_FAILURE); } matrix_t A; /* N x N constant matrix */ matrix_t B; /* N x 1 b matrix */ matrix_t reference_x; /* Reference solution */ matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel */ matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel */ /* Initialize the random number generator */ srand(time(NULL)); /* Generate diagonally dominant matrix */ printf("\nGenerating %d x %d system\n", MATRIX_SIZE, MATRIX_SIZE); A = create_diagonally_dominant_matrix(MATRIX_SIZE, MATRIX_SIZE); if (A.elements == NULL) { printf("Error creating matrix\n"); exit(EXIT_FAILURE); } /* Create the other vectors */ B = allocate_matrix_on_host(MATRIX_SIZE, 1, 1); reference_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0); gpu_naive_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0); gpu_opt_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0); #ifdef DEBUG print_matrix(A); print_matrix(B); print_matrix(reference_x); #endif /* Compute Jacobi solution on CPU */ printf("\nPerforming Jacobi iteration on the CPU\n"); compute_gold(A, reference_x, B); display_jacobi_solution(A, reference_x, B); /* Display statistics */ /* Compute Jacobi solution on device. Solutions are returned in gpu_naive_solution_x and gpu_opt_solution_x. */ printf("\nPerforming Jacobi iteration on device\n"); compute_on_device(A, gpu_naive_solution_x, gpu_opt_solution_x, B); display_jacobi_solution(A, gpu_naive_solution_x, B); /* Display statistics */ display_jacobi_solution(A, gpu_opt_solution_x, B); free(A.elements); free(B.elements); free(reference_x.elements); free(gpu_naive_solution_x.elements); free(gpu_opt_solution_x.elements); exit(EXIT_SUCCESS); } void compute_on_device(const matrix_t A, matrix_t gpu_naive_sol_x, matrix_t gpu_opt_sol_x, const matrix_t B) { compute_native(A, gpu_naive_sol_x, B); compute_optimized(A, gpu_opt_sol_x, B); return; } void compute_native(const matrix_t A, matrix_t gpu_naive_sol_x, const matrix_t B) { for (int i = 0; i < A.num_rows; i++) { gpu_naive_sol_x.elements[i] = B.elements[i]; } int num_threads = min((int)THREAD_BLOCK_1D_SIZE, (int)MATRIX_SIZE); dim3 threads(num_threads, 1, 1); dim3 grid(MATRIX_SIZE / threads.x, 1); matrix_t d_A = allocate_matrix_on_device(A); matrix_t d_B = allocate_matrix_on_device(B); matrix_t d_gpu_naive_sol_x = allocate_matrix_on_device(gpu_naive_sol_x); int *d_mutex; double *d_ssd; double ssd; unsigned int num_iter = 0; hipMalloc((void **)&d_mutex, sizeof(int)); hipMemset(d_mutex, 0, sizeof(int)); hipMalloc((void **)&d_ssd, sizeof(double)); copy_matrix_to_device(d_A, A); copy_matrix_to_device(d_B, B); copy_matrix_to_device(d_gpu_naive_sol_x, gpu_naive_sol_x); struct timeval start, stop; gettimeofday(&start, NULL); while(1) { hipMemset(d_ssd, 0, sizeof(double)); hipLaunchKernelGGL(( jacobi_iteration_kernel_naive), dim3(grid), dim3(threads), 0, 0, d_A.elements, d_B.elements, d_A.num_columns, d_A.num_rows, d_gpu_naive_sol_x.elements, d_ssd, d_mutex); hipDeviceSynchronize(); hipMemcpy(&ssd, d_ssd, sizeof(double), hipMemcpyDeviceToHost); num_iter++; if (sqrt(ssd) <= THRESHOLD) { break; } } hipDeviceSynchronize(); gettimeofday(&stop, NULL); printf("\nNaive convergence achieved after %d iterations \n", num_iter); printf("Naive execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000)); copy_matrix_from_device(gpu_naive_sol_x, d_gpu_naive_sol_x); hipFree(d_gpu_naive_sol_x.elements); hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_mutex); hipFree(d_ssd); return; } void compute_optimized(const matrix_t A, matrix_t gpu_opt_sol_x, const matrix_t B) { int num_threads = min((int)THREAD_BLOCK_1D_SIZE, (int)MATRIX_SIZE); dim3 threads(num_threads, 1, 1); dim3 grid(MATRIX_SIZE / threads.x, 1); matrix_t d_A = allocate_matrix_on_device(A); matrix_t col_major_A = allocate_matrix_on_host(MATRIX_SIZE, MATRIX_SIZE, 0); matrix_t d_col_major_A = allocate_matrix_on_device(col_major_A); copy_matrix_to_device(d_A, A); copy_matrix_to_device(d_col_major_A, col_major_A); int num_threadsrc = min((int)THREAD_BLOCK_2D_SIZE, (int)MATRIX_SIZE); dim3 row_to_col_threads(num_threadsrc, num_threadsrc, 1); dim3 row_to_col_grid(MATRIX_SIZE / row_to_col_threads.x, MATRIX_SIZE / row_to_col_threads.y, 1); hipLaunchKernelGGL(( row_to_col_major_kernel), dim3(row_to_col_grid), dim3(row_to_col_threads), 0, 0, d_A.elements, d_A.num_columns, d_A.num_rows, d_col_major_A.elements); hipDeviceSynchronize(); check_CUDA_error("Row to Column Major Kernel Launch Failure"); matrix_t d_B = allocate_matrix_on_device(B); matrix_t d_gpu_opt_sol_x = allocate_matrix_on_device(gpu_opt_sol_x); int *d_mutex; double *d_ssd; hipMalloc((void **)&d_mutex, sizeof(int)); hipMemset(d_mutex, 0, sizeof(int)); hipMalloc((void **)&d_ssd, sizeof(double)); for (int i = 0; i < B.num_rows; i++) { gpu_opt_sol_x.elements[i] = B.elements[i]; } copy_matrix_to_device(d_B, B); copy_matrix_to_device(d_gpu_opt_sol_x, gpu_opt_sol_x); double ssd; unsigned int num_iter = 0; struct timeval start, stop; gettimeofday(&start, NULL); while (1) { hipMemset(d_ssd, 0, sizeof(double)); hipLaunchKernelGGL(( jacobi_iteration_kernel_optimized), dim3(grid), dim3(threads), 0, 0, d_col_major_A.elements, d_B.elements, d_col_major_A.num_columns, d_col_major_A.num_rows, d_gpu_opt_sol_x.elements, d_ssd, d_mutex); hipDeviceSynchronize(); hipMemcpy(&ssd, d_ssd, sizeof(double), hipMemcpyDeviceToHost); num_iter++; if (sqrt(ssd) <= THRESHOLD) { break; } } hipDeviceSynchronize(); gettimeofday(&stop, NULL); printf("\nOptimized convergence achieved after %d iterations \n", num_iter); printf("Optimized execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000)); copy_matrix_from_device(gpu_opt_sol_x, d_gpu_opt_sol_x); hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_gpu_opt_sol_x.elements); hipFree(d_mutex); hipFree(d_ssd); return; } /* Allocate matrix on the device of same size as M */ matrix_t allocate_matrix_on_device(const matrix_t M) { matrix_t Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); hipMalloc((void **)&Mdevice.elements, size); return Mdevice; } /* Allocate a matrix of dimensions height * width. If init == 0, initialize to all zeroes. If init == 1, perform random initialization. */ matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float *)malloc(size * sizeof(float)); for (unsigned int i = 0; i < size; i++) { if (init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } /* Copy matrix to device */ void copy_matrix_to_device(matrix_t Mdevice, const matrix_t Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); return; } /* Copy matrix from device to host */ void copy_matrix_from_device(matrix_t Mhost, const matrix_t Mdevice) { int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); return; } /* Prints the matrix out to screen */ void print_matrix(const matrix_t M) { for (unsigned int i = 0; i < M.num_rows; i++) { for (unsigned int j = 0; j < M.num_columns; j++) { printf("%f ", M.elements[i * M.num_columns + j]); } printf("\n"); } printf("\n"); return; } /* Returns a floating-point value between [min, max] */ float get_random_number(int min, int max) { float r = rand()/(float)RAND_MAX; return (float)floor((double)(min + (max - min + 1) * r)); } /* Check for errors in kernel execution */ void check_CUDA_error(const char *msg) { hipError_t err = hipGetLastError(); if ( hipSuccess != err) { printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } return; } /* Create diagonally dominant matrix */ matrix_t create_diagonally_dominant_matrix(unsigned int num_rows, unsigned int num_columns) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; unsigned int size = M.num_rows * M.num_columns; M.elements = (float *)malloc(size * sizeof(float)); if (M.elements == NULL) return M; /* Create a matrix with random numbers between [-.5 and .5] */ unsigned int i, j; for (i = 0; i < size; i++) M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER); /* Make diagonal entries large with respect to the entries on each row. */ for (i = 0; i < num_rows; i++) { float row_sum = 0.0; for (j = 0; j < num_columns; j++) { row_sum += fabs(M.elements[i * M.num_rows + j]); } M.elements[i * M.num_rows + i] = 0.5 + row_sum; } return M; }
c26c3123ecaecf5f98d807be3a83a8cfb22d3ec9.cu
/* Host code for the Jacobi method of solving a system of linear equations * by iteration. * Build as follws: make clean && make * Author: Naga Kandasamy * Date modified: February 23, 2021 * * Student name(s); Kevin Connell, Casey Adams * Date modified: 3/2/2021 */ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include "jacobi_iteration.h" /* Include the kernel code */ #include "jacobi_iteration_kernel.cu" /* Uncomment the line below if you want the code to spit out debug information. */ /* #define DEBUG */ int main(int argc, char **argv) { if (argc > 1) { printf("This program accepts no arguments\n"); exit(EXIT_FAILURE); } matrix_t A; /* N x N constant matrix */ matrix_t B; /* N x 1 b matrix */ matrix_t reference_x; /* Reference solution */ matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel */ matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel */ /* Initialize the random number generator */ srand(time(NULL)); /* Generate diagonally dominant matrix */ printf("\nGenerating %d x %d system\n", MATRIX_SIZE, MATRIX_SIZE); A = create_diagonally_dominant_matrix(MATRIX_SIZE, MATRIX_SIZE); if (A.elements == NULL) { printf("Error creating matrix\n"); exit(EXIT_FAILURE); } /* Create the other vectors */ B = allocate_matrix_on_host(MATRIX_SIZE, 1, 1); reference_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0); gpu_naive_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0); gpu_opt_solution_x = allocate_matrix_on_host(MATRIX_SIZE, 1, 0); #ifdef DEBUG print_matrix(A); print_matrix(B); print_matrix(reference_x); #endif /* Compute Jacobi solution on CPU */ printf("\nPerforming Jacobi iteration on the CPU\n"); compute_gold(A, reference_x, B); display_jacobi_solution(A, reference_x, B); /* Display statistics */ /* Compute Jacobi solution on device. Solutions are returned in gpu_naive_solution_x and gpu_opt_solution_x. */ printf("\nPerforming Jacobi iteration on device\n"); compute_on_device(A, gpu_naive_solution_x, gpu_opt_solution_x, B); display_jacobi_solution(A, gpu_naive_solution_x, B); /* Display statistics */ display_jacobi_solution(A, gpu_opt_solution_x, B); free(A.elements); free(B.elements); free(reference_x.elements); free(gpu_naive_solution_x.elements); free(gpu_opt_solution_x.elements); exit(EXIT_SUCCESS); } void compute_on_device(const matrix_t A, matrix_t gpu_naive_sol_x, matrix_t gpu_opt_sol_x, const matrix_t B) { compute_native(A, gpu_naive_sol_x, B); compute_optimized(A, gpu_opt_sol_x, B); return; } void compute_native(const matrix_t A, matrix_t gpu_naive_sol_x, const matrix_t B) { for (int i = 0; i < A.num_rows; i++) { gpu_naive_sol_x.elements[i] = B.elements[i]; } int num_threads = min((int)THREAD_BLOCK_1D_SIZE, (int)MATRIX_SIZE); dim3 threads(num_threads, 1, 1); dim3 grid(MATRIX_SIZE / threads.x, 1); matrix_t d_A = allocate_matrix_on_device(A); matrix_t d_B = allocate_matrix_on_device(B); matrix_t d_gpu_naive_sol_x = allocate_matrix_on_device(gpu_naive_sol_x); int *d_mutex; double *d_ssd; double ssd; unsigned int num_iter = 0; cudaMalloc((void **)&d_mutex, sizeof(int)); cudaMemset(d_mutex, 0, sizeof(int)); cudaMalloc((void **)&d_ssd, sizeof(double)); copy_matrix_to_device(d_A, A); copy_matrix_to_device(d_B, B); copy_matrix_to_device(d_gpu_naive_sol_x, gpu_naive_sol_x); struct timeval start, stop; gettimeofday(&start, NULL); while(1) { cudaMemset(d_ssd, 0, sizeof(double)); jacobi_iteration_kernel_naive<<<grid, threads>>>(d_A.elements, d_B.elements, d_A.num_columns, d_A.num_rows, d_gpu_naive_sol_x.elements, d_ssd, d_mutex); cudaDeviceSynchronize(); cudaMemcpy(&ssd, d_ssd, sizeof(double), cudaMemcpyDeviceToHost); num_iter++; if (sqrt(ssd) <= THRESHOLD) { break; } } cudaThreadSynchronize(); gettimeofday(&stop, NULL); printf("\nNaive convergence achieved after %d iterations \n", num_iter); printf("Naive execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000)); copy_matrix_from_device(gpu_naive_sol_x, d_gpu_naive_sol_x); cudaFree(d_gpu_naive_sol_x.elements); cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_mutex); cudaFree(d_ssd); return; } void compute_optimized(const matrix_t A, matrix_t gpu_opt_sol_x, const matrix_t B) { int num_threads = min((int)THREAD_BLOCK_1D_SIZE, (int)MATRIX_SIZE); dim3 threads(num_threads, 1, 1); dim3 grid(MATRIX_SIZE / threads.x, 1); matrix_t d_A = allocate_matrix_on_device(A); matrix_t col_major_A = allocate_matrix_on_host(MATRIX_SIZE, MATRIX_SIZE, 0); matrix_t d_col_major_A = allocate_matrix_on_device(col_major_A); copy_matrix_to_device(d_A, A); copy_matrix_to_device(d_col_major_A, col_major_A); int num_threadsrc = min((int)THREAD_BLOCK_2D_SIZE, (int)MATRIX_SIZE); dim3 row_to_col_threads(num_threadsrc, num_threadsrc, 1); dim3 row_to_col_grid(MATRIX_SIZE / row_to_col_threads.x, MATRIX_SIZE / row_to_col_threads.y, 1); row_to_col_major_kernel<<<row_to_col_grid, row_to_col_threads>>>(d_A.elements, d_A.num_columns, d_A.num_rows, d_col_major_A.elements); cudaDeviceSynchronize(); check_CUDA_error("Row to Column Major Kernel Launch Failure"); matrix_t d_B = allocate_matrix_on_device(B); matrix_t d_gpu_opt_sol_x = allocate_matrix_on_device(gpu_opt_sol_x); int *d_mutex; double *d_ssd; cudaMalloc((void **)&d_mutex, sizeof(int)); cudaMemset(d_mutex, 0, sizeof(int)); cudaMalloc((void **)&d_ssd, sizeof(double)); for (int i = 0; i < B.num_rows; i++) { gpu_opt_sol_x.elements[i] = B.elements[i]; } copy_matrix_to_device(d_B, B); copy_matrix_to_device(d_gpu_opt_sol_x, gpu_opt_sol_x); double ssd; unsigned int num_iter = 0; struct timeval start, stop; gettimeofday(&start, NULL); while (1) { cudaMemset(d_ssd, 0, sizeof(double)); jacobi_iteration_kernel_optimized<<<grid, threads>>>(d_col_major_A.elements, d_B.elements, d_col_major_A.num_columns, d_col_major_A.num_rows, d_gpu_opt_sol_x.elements, d_ssd, d_mutex); cudaDeviceSynchronize(); cudaMemcpy(&ssd, d_ssd, sizeof(double), cudaMemcpyDeviceToHost); num_iter++; if (sqrt(ssd) <= THRESHOLD) { break; } } cudaThreadSynchronize(); gettimeofday(&stop, NULL); printf("\nOptimized convergence achieved after %d iterations \n", num_iter); printf("Optimized execution time = %fs\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000)); copy_matrix_from_device(gpu_opt_sol_x, d_gpu_opt_sol_x); cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_gpu_opt_sol_x.elements); cudaFree(d_mutex); cudaFree(d_ssd); return; } /* Allocate matrix on the device of same size as M */ matrix_t allocate_matrix_on_device(const matrix_t M) { matrix_t Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); cudaMalloc((void **)&Mdevice.elements, size); return Mdevice; } /* Allocate a matrix of dimensions height * width. If init == 0, initialize to all zeroes. If init == 1, perform random initialization. */ matrix_t allocate_matrix_on_host(int num_rows, int num_columns, int init) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float *)malloc(size * sizeof(float)); for (unsigned int i = 0; i < size; i++) { if (init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } /* Copy matrix to device */ void copy_matrix_to_device(matrix_t Mdevice, const matrix_t Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); return; } /* Copy matrix from device to host */ void copy_matrix_from_device(matrix_t Mhost, const matrix_t Mdevice) { int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); return; } /* Prints the matrix out to screen */ void print_matrix(const matrix_t M) { for (unsigned int i = 0; i < M.num_rows; i++) { for (unsigned int j = 0; j < M.num_columns; j++) { printf("%f ", M.elements[i * M.num_columns + j]); } printf("\n"); } printf("\n"); return; } /* Returns a floating-point value between [min, max] */ float get_random_number(int min, int max) { float r = rand()/(float)RAND_MAX; return (float)floor((double)(min + (max - min + 1) * r)); } /* Check for errors in kernel execution */ void check_CUDA_error(const char *msg) { cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err) { printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } return; } /* Create diagonally dominant matrix */ matrix_t create_diagonally_dominant_matrix(unsigned int num_rows, unsigned int num_columns) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; unsigned int size = M.num_rows * M.num_columns; M.elements = (float *)malloc(size * sizeof(float)); if (M.elements == NULL) return M; /* Create a matrix with random numbers between [-.5 and .5] */ unsigned int i, j; for (i = 0; i < size; i++) M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER); /* Make diagonal entries large with respect to the entries on each row. */ for (i = 0; i < num_rows; i++) { float row_sum = 0.0; for (j = 0; j < num_columns; j++) { row_sum += fabs(M.elements[i * M.num_rows + j]); } M.elements[i * M.num_rows + i] = 0.5 + row_sum; } return M; }
93d847ec1bfc6dcc8e64f0656b28720b8db4b898.hip
// !!! This is a file automatically generated by hipify!!! #include "common_hip.cuh" // The current ray and its payload. rtDeclareVariable(optix::Ray, ray, rtCurrentRay , ); rtDeclareVariable(RayPayload, payload, rtPayload, ); // Environment map texture sampler. rtTextureSampler<float4, 2> env_map; RT_PROGRAM void miss_environment_constant() { // Azimuth; angle from the ray's z-axis (ccw) to (x, z). Reference: https://www.wikiwand.com/en/Spherical_coordinate_system float theta = atan2f(ray.direction.x, ray.direction.z); // Altitude; angle from y-axis (down) to (x, y, z). float phi = 0.5f * M_PIf - acosf(ray.direction.y); // NOTE: no division by ray length, since it is normalized. // Derive texture coordinates. NOTE: M_1_PIf = 1 / M_PIf float u = 0.5f * theta * M_1_PIf; // "theta" is in [0.0, 2pi], hence "0.5 * theta / pi" is in [0.0, 1.0] float v = 0.5f * (1.0f + sin(phi)); // "sin(phi)" is in [-1.0, 1.0], hence "0.5 + 0.5 * sin(phi)" is in [0.0, 1.0] // Artificially brighten the image a bit. float3 ambient_term = make_float3(0.1f, 0.1f, 0.1f); payload.radiance = make_float3(tex2D(env_map, u, v)) + ambient_term; }
93d847ec1bfc6dcc8e64f0656b28720b8db4b898.cu
#include "common.cuh" // The current ray and its payload. rtDeclareVariable(optix::Ray, ray, rtCurrentRay , ); rtDeclareVariable(RayPayload, payload, rtPayload, ); // Environment map texture sampler. rtTextureSampler<float4, 2> env_map; RT_PROGRAM void miss_environment_constant() { // Azimuth; angle from the ray's z-axis (ccw) to (x, z). Reference: https://www.wikiwand.com/en/Spherical_coordinate_system float theta = atan2f(ray.direction.x, ray.direction.z); // Altitude; angle from y-axis (down) to (x, y, z). float phi = 0.5f * M_PIf - acosf(ray.direction.y); // NOTE: no division by ray length, since it is normalized. // Derive texture coordinates. NOTE: M_1_PIf = 1 / M_PIf float u = 0.5f * theta * M_1_PIf; // "theta" is in [0.0, 2pi], hence "0.5 * theta / pi" is in [0.0, 1.0] float v = 0.5f * (1.0f + sin(phi)); // "sin(phi)" is in [-1.0, 1.0], hence "0.5 + 0.5 * sin(phi)" is in [0.0, 1.0] // Artificially brighten the image a bit. float3 ambient_term = make_float3(0.1f, 0.1f, 0.1f); payload.radiance = make_float3(tex2D(env_map, u, v)) + ambient_term; }
8a4d8d38b0a101bf81e2bd5a8562b37825a2250d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { #ifdef USE_ROCM template<typename Dtype> __global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int_tp w = index % width; const int_tp h = (index / width) % height; const int_tp n = index / width / height; const int_tp offset = (n * channels * height + h) * width + w; const int_tp step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int_tp head = 0; const int_tp pre_pad = (size - 1) / 2; const int_tp post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } #endif // USE_ROCM template<typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL)<< "Unknown normalization region."; } } // TODO: check if it would be faster to just put it into the previous kernel. #ifdef USE_ROCM template<typename Dtype> __global__ void LRNComputeOutput(const int_tp nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } #endif // USE_ROCM template<typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int_tp n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); int_tp n_threads = num_ * height_ * width_; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_fill_scale")); viennacl::ocl::enqueue( oclk_lrn_fill(n_threads, WrapHandle((cl_mem) bottom_data, &ctx), num_, channels_, height_, width_, size_, alpha_ / size_, k_, WrapHandle((cl_mem) scale_data, &ctx)), ctx.get_queue()); n_threads = bottom[0]->count(); viennacl::ocl::kernel &oclk_lrn_compute = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_output")); viennacl::ocl::enqueue( oclk_lrn_compute(n_threads, WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) scale_data, &ctx), -beta_, WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); #endif // USE_GREENTEA } } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template<typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL)<< "Unknown normalization region."; } } #ifdef USE_ROCM template<typename Dtype> __global__ void LRNComputeDiff(const int_tp nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int_tp w = index % width; const int_tp h = (index / width) % height; const int_tp n = index / width / height; const int_tp offset = (n * channels * height + h) * width + w; const int_tp step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int_tp head = 0; const int_tp pre_pad = size - (size + 1) / 2; const int_tp post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } #endif // USE_ROCM template<typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int_tp n_threads = num_ * height_ * width_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_lrn = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_diff")); viennacl::ocl::enqueue( oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), &ctx), WrapHandle((cl_mem) (top[0]->gpu_data()), &ctx), WrapHandle((cl_mem) (scale_.gpu_data()), &ctx), WrapHandle((cl_mem) (top[0]->gpu_diff()), &ctx), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), &ctx)), ctx.get_queue()); #endif // USE_GREENTEA } } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
8a4d8d38b0a101bf81e2bd5a8562b37825a2250d.cu
#include <vector> #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { #ifdef USE_CUDA template<typename Dtype> __global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int_tp w = index % width; const int_tp h = (index / width) % height; const int_tp n = index / width / height; const int_tp offset = (n * channels * height + h) * width + w; const int_tp step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int_tp head = 0; const int_tp pre_pad = (size - 1) / 2; const int_tp post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } #endif // USE_CUDA template<typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL)<< "Unknown normalization region."; } } // TODO: check if it would be faster to just put it into the previous kernel. #ifdef USE_CUDA template<typename Dtype> __global__ void LRNComputeOutput(const int_tp nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } #endif // USE_CUDA template<typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int_tp n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); int_tp n_threads = num_ * height_ * width_; viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel( CL_KERNEL_SELECT("lrn_fill_scale")); viennacl::ocl::enqueue( oclk_lrn_fill(n_threads, WrapHandle((cl_mem) bottom_data, &ctx), num_, channels_, height_, width_, size_, alpha_ / size_, k_, WrapHandle((cl_mem) scale_data, &ctx)), ctx.get_queue()); n_threads = bottom[0]->count(); viennacl::ocl::kernel &oclk_lrn_compute = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_output")); viennacl::ocl::enqueue( oclk_lrn_compute(n_threads, WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) scale_data, &ctx), -beta_, WrapHandle((cl_mem) top_data, &ctx)), ctx.get_queue()); #endif // USE_GREENTEA } } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template<typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL)<< "Unknown normalization region."; } } #ifdef USE_CUDA template<typename Dtype> __global__ void LRNComputeDiff(const int_tp nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int_tp num, const int_tp channels, const int_tp height, const int_tp width, const int_tp size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int_tp w = index % width; const int_tp h = (index / width) % height; const int_tp n = index / width / height; const int_tp offset = (n * channels * height + h) * width + w; const int_tp step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int_tp head = 0; const int_tp pre_pad = size - (size + 1) / 2; const int_tp post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } #endif // USE_CUDA template<typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int_tp n_threads = num_ * height_ * width_; if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS)( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_lrn = program.get_kernel( CL_KERNEL_SELECT("lrn_compute_diff")); viennacl::ocl::enqueue( oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), &ctx), WrapHandle((cl_mem) (top[0]->gpu_data()), &ctx), WrapHandle((cl_mem) (scale_.gpu_data()), &ctx), WrapHandle((cl_mem) (top[0]->gpu_diff()), &ctx), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), &ctx)), ctx.get_queue()); #endif // USE_GREENTEA } } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
0a53fc5fdd7deea7d3b0fa4c38013b94574e9a11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * a simple test */ __shared__ float data1[32][32]; __shared__ float data2[32][32]; __shared__ float data3[32][32]; __device__ void mult(__shared__ float d1[32][32], __shared__ float d2[32][32], __shared__ float d3[32][32], int idx) { int i; for (i = 0; i < 31; i++) { d1[idx][i] = d2[idx+1][i-1] + d2[idx][i-1] + d2[idx-1][i-1] + d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] + d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1]; } } __global__ void doit(int start, int end) { int i; for (i = start; i < end; i++) { mult(data1, data2, data3, i); } }
0a53fc5fdd7deea7d3b0fa4c38013b94574e9a11.cu
/* * a simple test */ __shared__ float data1[32][32]; __shared__ float data2[32][32]; __shared__ float data3[32][32]; __device__ void mult(__shared__ float d1[32][32], __shared__ float d2[32][32], __shared__ float d3[32][32], int idx) { int i; for (i = 0; i < 31; i++) { d1[idx][i] = d2[idx+1][i-1] + d2[idx][i-1] + d2[idx-1][i-1] + d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] + d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1]; } } __global__ void doit(int start, int end) { int i; for (i = start; i < end; i++) { mult(data1, data2, data3, i); } }
93f17e0433af518d2f49b484de125480a4c7e81b.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include<limits> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include <algorithm> using namespace std; // 8 byte. how to be 128byte? // Parameter need to restruct. //2 bytes, 2 bytes, 4 bytes, 4 bytes, 4 bytes. struct NUM_ADD { short2 read_haplotype; int Read_array; int read_large_length; }; double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } __constant__ float constant[10]; __constant__ int constant_int[10]; __global__ void pairHMM( int size, char * data, NUM_ADD * num_add, float * result,float * MG,float * DG, float * IG ) // what is the maximum number of parameters? { //MG, DG and IG are global memory to store indermediate result? //each thread finish one computation int offset=blockIdx.x*blockDim.x+threadIdx.x; MG=MG+offset; IG=IG+offset; DG=DG+offset; //if(threadIdx.x==0) //printf("%d %d %d %d %d\n", constant_int[0],constant_int[1], constant_int[2],constant_int[3], constant_int[4]); while(offset<size) { __shared__ float parameter1[512]; __shared__ float parameter2[512]; __shared__ float parameter3[512]; __shared__ float parameter4[512]; //NUM_ADD number_address; //number_address=num_add[offset];//get from global memory short2 read_haplotype_number=num_add[offset].read_haplotype; int read_large_length=num_add[offset].read_large_length; //read_haplotype_number.x=number_address.read_number; char4 * read_base_array=(char4 *)(data+num_add[offset].Read_array); // to caculate the address of read_base_array. float *parameter1_array=(float *) (read_base_array+(read_large_length+3)/4*32); read_large_length=read_large_length*32; float *parameter2_array=(float *) (parameter1_array+read_large_length); float *parameter3_array=(float *) (parameter1_array+read_large_length*2); float *parameter4_array=(float *) (parameter1_array+read_large_length*3); //read_haplotype_number.y=number_address.haplotype_number; char4 * haplotype_base_array=(char4 * )(parameter1_array+read_large_length*4); //haplotype is 4 byte. Thus, in a warp it is 4*32=128 byte. //we need to change the struct of haplotype float result_block=constant[5]; char4 read_base_4; int i; // this for loop it increase register number 29 //if number_address.read_number is even for(i=0;i<read_haplotype_number.x/4;i++) { //got read_base from globle memory (which is 32*4 (char4) = 128 bytes ) read_base_4=read_base_array[i*constant_int[2]]; int skip=i*constant_int[1]; parameter1[threadIdx.x]=parameter1_array[skip]; parameter2[threadIdx.x]=parameter2_array[skip]; parameter3[threadIdx.x]=parameter3_array[skip]; parameter4[threadIdx.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*2]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*2]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*2]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*2]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*3]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*3]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*3]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*3]=parameter4_array[skip]; float Ml=constant[5];// left M; float Dl=constant[5];// left D; float Il=constant[5];// left I float M2=constant[5]; //left M2 float D2=constant[5]; //left D2 float M3=constant[5]; float D3=constant[5]; float M4=constant[5]; float D4=constant[5]; float MU=constant[5];// up M; float IU=constant[5];// up I; float DU=constant[5];// up D; float MMID=constant[5]; float MMID2=constant[5]; float MMID3=constant[5]; float MMID4=constant[5]; //epsion=constant[4]; // beta=constant[3]; int hh=(read_haplotype_number.y+3)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { float Qm,Qm_1,alpha,delta,xiksi; if(j*4+kk==read_haplotype_number.y) break; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } else { DU= constant[0] /(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } Qm=parameter1[threadIdx.x]; delta=parameter2[threadIdx.x]; xiksi=parameter3[threadIdx.x]; alpha=parameter4[threadIdx.x]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); char4 read_haplotype_base; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; float aa=(read_haplotype_base.y==read_base_4.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Dl=__fmaf_rn(Dl,constant[4],DDM); Il=__fmaf_rn(MU,delta,IIMI); MMID=__fmaf_rn(alpha,MU,MIIDD); skip=threadIdx.x+blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; // epsion=0.1; // beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,Dl); DDM=__fmul_rn(M2,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4.y)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M2=__fmul_rn(aa,MMID2); D2=__fmaf_rn(D2,constant[4],DDM); Il=__fmaf_rn(Ml,delta,IIMI); MMID2=__fmaf_rn(alpha,Ml,MIIDD); skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; //epsion=0.1; //beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D2); DDM=__fmul_rn(M3,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4.z)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M3=__fmul_rn(aa,MMID3); D3=__fmaf_rn(D3,constant[4],DDM); Il=__fmaf_rn(M2,delta,IIMI); MMID3=__fmaf_rn(alpha,M2,MIIDD); skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; // epsion=0.1; // beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); //if(i==3) printf("M3=%e I3=%e D3=%e\n", M3,I3,D3); MID=__fadd_rn(Il,D3); DDM=__fmul_rn(M4,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4.w)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M4=__fmul_rn(aa,MMID4); D4=__fmaf_rn(D4,constant[4],DDM); Il=__fmaf_rn(M3,delta,IIMI); MMID4=__fmaf_rn(alpha,M3,MIIDD); if(i==read_haplotype_number.x/4-1 && read_haplotype_number.x%4==0) { result_block=__fadd_rn(result_block,__fadd_rn(M4,Il)); } else { MG[index]=M4; IG[index]=Il; DG[index]=D4; } }//4 }//haplotype } //following is only 56 registers for(i=i*4;i<read_haplotype_number.x;i++) { //char4 read_base_4; if(i%4==0) { read_base_4=read_base_array[i/4*constant_int[2]]; } char4 read_haplotype_base; if(i%4==0) read_haplotype_base.x=read_base_4.x; if(i%4==1) read_haplotype_base.x=read_base_4.y; if(i%4==2) read_haplotype_base.x=read_base_4.z; if(i%4==3) read_haplotype_base.x=read_base_4.w; float Qm,Qm_1,alpha,delta,xiksi; delta=parameter2_array[i*constant_int[2]]; xiksi=parameter3_array[i*constant_int[2]]; alpha=parameter4_array[i*constant_int[2]]; Qm=parameter1_array[i*constant_int[2]]; Qm_1=constant[1]-Qm; //epsion=constant[4]; //beta=constant[3]; Qm=fdividef(Qm,constant[2]); float Ml=0;// left M; float Dl=0;// left D; float Il=0; float MU=0;// up M; float IU=0;// up I; float DU=0;// up D; float MMID=0; if(i==0) { DU=constant[0]/(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } int hh=(read_haplotype_number.y+4-1)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { if(j*4+kk==read_haplotype_number.y) break; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); float aa=(read_haplotype_base.y==read_haplotype_base.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Il=__fmaf_rn(MU,delta,IIMI); Dl=__fmaf_rn(Dl,constant[4],DDM); MMID=__fmaf_rn(alpha,MU,MIIDD); if(i<read_haplotype_number.x-1) { MG[index]=Ml; IG[index]=Il; DG[index]=Dl; } else result_block=__fadd_rn(result_block,__fadd_rn(Ml,Il)); }//4 } //haplotype }//read result[offset]=result_block; offset+=gridDim.x*blockDim.x ; } } struct InputData { int read_size; char read_base[260]; char base_quals[260]; char ins_quals[260]; char del_quals[260]; char gcp_quals[260]; int haplotype_size; char haplotype_base[500]; }; bool operator<(const InputData &a, const InputData &b) { // return x.point_value > y.point_value; if(a.read_size<b.read_size) return true; if(a.read_size==b.read_size) return a.haplotype_size<b.haplotype_size; else return false; } int main(int argc, char * argv[]) { int INI=(log10f((std::numeric_limits<float>::max() / 16))); //printf("input value of size_each_for \n"); //scanf("%d", &size_each_for); struct timespec start,finish; double computation_time=0,mem_cpy_time=0,read_time=0, data_prepare=0; double total_time=0; FILE * file; float * MG; float * DG; float * IG; hipMalloc( (float **)& MG,sizeof(float) *128*90*500*3); DG=MG+90*128*500;// ???? IG=DG+90*128*500; //????? //file=fopen("../a.txt","r"); file=fopen("/data/04068/sren/dir_chromosome-10/b.txt","r"); // file=fopen(argv[1],"r"); //file=fopen("32_data.txt","r"); // file=fopen("less.txt","r"); int size; fscanf(file,"%d",&size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); float ph2pr_h[128]; for(int i=0;i<128;i++) { ph2pr_h[i]=powf(10.f, -((float)i) / 10.f); } hipError_t err; int constants_h_int[10]; float constants_h[10]; constants_h[0]=1.329228e+36; constants_h[1]=1.0; constants_h[2]=3.0; constants_h[3]=0.9; constants_h[4]=0.1; constants_h[5]=0.0; constants_h_int[0]=0; constants_h_int[1]=128; constants_h_int[2]=32; constants_h_int[3]=4; constants_h_int[4]=3; hipMemcpyToSymbol(constant,constants_h,sizeof(float)*10 ); hipMemcpyToSymbol(constant_int,constants_h_int,sizeof(int)*10 ); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); data_prepare+=diff(start,finish); int total=0; char * result_d_total; float read_read, haplotype_haplotype; while(!feof(file)) { total+=size; char useless; useless=fgetc(file); clock_gettime(CLOCK_MONOTONIC_RAW,&start); InputData *inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { int read_size; fscanf(file,"%d\n",&inputdata[i].read_size); fscanf(file,"%s ",inputdata[i].read_base); read_size=inputdata[i].read_size; read_read=read_size; for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i]. base_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].ins_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].del_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; if(j<read_size-1) fscanf(file,"%d ",&aa); else fscanf(file,"%d \n",&aa); inputdata[i].gcp_quals[j]=(char)aa; } fscanf(file,"%d\n",&inputdata[i].haplotype_size); fscanf(file, "%s\n",inputdata[i].haplotype_base); haplotype_haplotype=inputdata[i].haplotype_size; } clock_gettime(CLOCK_MONOTONIC_RAW,&finish); read_time+=diff(start,finish); float * result_h=(float *) malloc(sizeof(float)*size); struct timespec start_total,finish_total; clock_gettime(CLOCK_MONOTONIC_RAW,&start_total); char * data_h_total; std::sort(inputdata, inputdata+size); //32 one chunck. int malloc_size_for_each_chunk=(65*4*32+260*4*32*4+125*4*32) ; int total_size=(size+31)/32*malloc_size_for_each_chunk+(size*sizeof(NUM_ADD)+127)/128*128; data_h_total=(char*)malloc(total_size); err=hipMalloc( (char **) &result_d_total,total_size+size*sizeof(float)); if(err!=hipSuccess) printf("Error %d:%s !\n", err, hipGetErrorString(err)); char * data_d_total=result_d_total; float * result_d=(float *)(result_d_total+total_size);//last part is to store the result. char * data_h=data_h_total; char * data_h_begin=data_h; NUM_ADD *data_num_add=(NUM_ADD *) (data_h); data_h=data_h+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about alignment. int data_size=0; //for each chunk int total_in_each=(size+31)/32; for(int i=0;i<total_in_each;i++) { //each is 32 //printf("total_in_each %d\n",total_in_each); //read_base int long_read_size=0; //to find the longest read_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_read_size<inputdata[i*32+j].read_size) long_read_size=inputdata[i*32+j].read_size; } int change_length=(long_read_size+3)/4;//because tile=4; each time deal with 4 read char4 read_base_data[32*65]; for(int kk=0;kk<change_length;kk++) { for(int dd=0;dd<32;dd++) // { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk*4) continue; else read_base_data[kk*32+dd].x=inputdata[i*32+dd].read_base[kk*4]; if(inputdata[i*32+dd].read_size<=kk*4+1) continue; else read_base_data[kk*32+dd].y=inputdata[i*32+dd].read_base[kk*4+1]; if(inputdata[i*32+dd].read_size<=kk*4+2) continue; else read_base_data[kk*32+dd].z=inputdata[i*32+dd].read_base[kk*4+2]; if(inputdata[i*32+dd].read_size<=kk*4+3) continue; else read_base_data[kk*32+dd].w=inputdata[i*32+dd].read_base[kk*4+3]; } } //finish read_base float parameter1[260*32];//Qm//128 do not change to 128 float parameter2[260*32];//QI//128 do not change to 128 float parameter3[260*32];//QD/128 do not change to 128 float parameter4[260*32];//alpha//128 do not change to 128 for(int kk=0;kk<long_read_size;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk) continue; else { parameter1[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].base_quals[kk]&127]; parameter2[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].ins_quals[kk]&127] ; parameter3[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].del_quals[kk]&127] ; parameter4[kk*32+dd]= 1.0f-ph2pr_h[((int)(inputdata[i*32+dd].ins_quals[kk]&127)+(int)( inputdata[i*32+dd].del_quals[kk]&127))&127]; // printf("kk=%d x=%d y=%d z=%d w=%d \n ",kk,parameter1[kk*32+dd],parameter2[kk*32+dd],parameter3[kk*32+dd],parameter4[kk*32+dd] ); } } } //to haplotype into 32 char4 int long_haplotype_size=0; //to find the longest hapltoype_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_haplotype_size<inputdata[i*32+j].haplotype_size) long_haplotype_size=inputdata[i*32+j].haplotype_size; } int haplotype_change_length=(long_haplotype_size+3)/4; char4 haplotype_base_data[32*125]; for(int kk=0;kk<haplotype_change_length;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].haplotype_size<=kk*4) continue; else haplotype_base_data[kk*32+dd].x=inputdata[i*32+dd].haplotype_base[kk*4]; if(inputdata[i*32+dd].haplotype_size<=kk*4+1) continue; else haplotype_base_data[kk*32+dd].y=inputdata[i*32+dd].haplotype_base[kk*4+1]; if(inputdata[i*32+dd].haplotype_size<=kk*4+2) continue; else haplotype_base_data[kk*32+dd].z=inputdata[i*32+dd].haplotype_base[kk*4+2]; if(inputdata[i*32+dd].haplotype_size<=kk*4+3) continue; else haplotype_base_data[kk*32+dd].w=inputdata[i*32+dd].haplotype_base[kk*4+3]; } } //put data address to each pair of read and haplotype. // read address memcpy(data_h,read_base_data,sizeof(char4)*32*change_length);//128 for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_haplotype.x=inputdata[i*32+kk].read_size; data_num_add[i*32+kk].read_haplotype.y=inputdata[i*32+kk].haplotype_size; data_num_add[i*32+kk].Read_array=data_size+sizeof(char4)*kk; // printf("set read size %d %d \n", data_num_add[i*32+kk].read_number,data_num_add[i*32+kk].haplotype_number); } data_h+=sizeof(char4)*32*change_length; data_size+=sizeof(char4)*32*change_length; //parameter address memcpy(data_h,parameter1,sizeof(float)*32*long_read_size); for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_large_length=long_read_size; } data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter2,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter3,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter4,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; //haplotype address memcpy(data_h,haplotype_base_data,sizeof(char4)*32*haplotype_change_length); data_h+=sizeof(char4)*32*haplotype_change_length; data_size+=sizeof(char4)*32*haplotype_change_length; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; char * data_d; NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); data_d=data_d_total+(sizeof(NUM_ADD)*size+127)/128*128; //printf("data_d_total %p num_add_d %p data_d %p \n",data_d_total, num_add_d,data_d); int blocksize=128; int gridsize=90; dim3 block(blocksize); dim3 grid(gridsize); // global memory to be used by GPU kernels. //float * MG; // float * DG; // float * IG; clock_gettime(CLOCK_MONOTONIC_RAW,&start); err=hipMemcpy(data_d_total,data_h_begin,data_size_to_copy,hipMemcpyHostToDevice); if(err!=hipSuccess) printf("Error %d: %s !\n", err, hipGetErrorString(err)); //float * MG; // float * DG; // float * IG; //hipMalloc( (float **)& MG,sizeof(float) *blocksize*gridsize*500*3); //DG=MG+blocksize*gridsize*500;// ???? //IG=DG+blocksize*gridsize*500; //????? hipLaunchKernelGGL(( pairHMM), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d, result_d,MG,DG,IG); hipMemcpy(result_h,result_d,size*sizeof(float),hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); for(int i=0;i<size;i++) float aa=(log10f((double)result_h[i]) - INI); // printf(" i=%d %e\n",i, result_h[i]); free(data_h_total); hipFree(result_d_total); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish_total); total_time+=diff(start_total,finish_total); free(inputdata); fscanf(file,"%d",&size); free(result_h); // printf("%d\n",size); // if(total>10000) // break; } // clock_gettime(CLOCK_MONOTONIC_RAW,&start); hipFree(MG); hipDeviceReset(); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // mem_cpy_time+=diff(start,finish);//(finish1.tv_nsec-start1.tv_nsec)/1000000000.0; printf("read_time=%e initial_time=%e computation_time= %e total_time=%e\n",read_time, data_prepare,computation_time, computation_time+mem_cpy_time); printf("Total time=%e\n",total_time); // printf("GCUPS: %lf \n", fakesize*read_read*haplotype_haplotype/computation_time/1000000000); return 0; }
93f17e0433af518d2f49b484de125480a4c7e81b.cu
#include <iostream> #include<limits> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include <algorithm> using namespace std; // 8 byte. how to be 128byte? // Parameter need to restruct. //2 bytes, 2 bytes, 4 bytes, 4 bytes, 4 bytes. struct NUM_ADD { short2 read_haplotype; int Read_array; int read_large_length; }; double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } __constant__ float constant[10]; __constant__ int constant_int[10]; __global__ void pairHMM( int size, char * data, NUM_ADD * num_add, float * result,float * MG,float * DG, float * IG ) // what is the maximum number of parameters? { //MG, DG and IG are global memory to store indermediate result? //each thread finish one computation int offset=blockIdx.x*blockDim.x+threadIdx.x; MG=MG+offset; IG=IG+offset; DG=DG+offset; //if(threadIdx.x==0) //printf("%d %d %d %d %d\n", constant_int[0],constant_int[1], constant_int[2],constant_int[3], constant_int[4]); while(offset<size) { __shared__ float parameter1[512]; __shared__ float parameter2[512]; __shared__ float parameter3[512]; __shared__ float parameter4[512]; //NUM_ADD number_address; //number_address=num_add[offset];//get from global memory short2 read_haplotype_number=num_add[offset].read_haplotype; int read_large_length=num_add[offset].read_large_length; //read_haplotype_number.x=number_address.read_number; char4 * read_base_array=(char4 *)(data+num_add[offset].Read_array); // to caculate the address of read_base_array. float *parameter1_array=(float *) (read_base_array+(read_large_length+3)/4*32); read_large_length=read_large_length*32; float *parameter2_array=(float *) (parameter1_array+read_large_length); float *parameter3_array=(float *) (parameter1_array+read_large_length*2); float *parameter4_array=(float *) (parameter1_array+read_large_length*3); //read_haplotype_number.y=number_address.haplotype_number; char4 * haplotype_base_array=(char4 * )(parameter1_array+read_large_length*4); //haplotype is 4 byte. Thus, in a warp it is 4*32=128 byte. //we need to change the struct of haplotype float result_block=constant[5]; char4 read_base_4; int i; // this for loop it increase register number 29 //if number_address.read_number is even for(i=0;i<read_haplotype_number.x/4;i++) { //got read_base from globle memory (which is 32*4 (char4) = 128 bytes ) read_base_4=read_base_array[i*constant_int[2]]; int skip=i*constant_int[1]; parameter1[threadIdx.x]=parameter1_array[skip]; parameter2[threadIdx.x]=parameter2_array[skip]; parameter3[threadIdx.x]=parameter3_array[skip]; parameter4[threadIdx.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*2]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*2]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*2]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*2]=parameter4_array[skip]; skip+=constant_int[2]; parameter1[threadIdx.x+blockDim.x*3]=parameter1_array[skip]; parameter2[threadIdx.x+blockDim.x*3]=parameter2_array[skip]; parameter3[threadIdx.x+blockDim.x*3]=parameter3_array[skip]; parameter4[threadIdx.x+blockDim.x*3]=parameter4_array[skip]; float Ml=constant[5];// left M; float Dl=constant[5];// left D; float Il=constant[5];// left I float M2=constant[5]; //left M2 float D2=constant[5]; //left D2 float M3=constant[5]; float D3=constant[5]; float M4=constant[5]; float D4=constant[5]; float MU=constant[5];// up M; float IU=constant[5];// up I; float DU=constant[5];// up D; float MMID=constant[5]; float MMID2=constant[5]; float MMID3=constant[5]; float MMID4=constant[5]; //epsion=constant[4]; // beta=constant[3]; int hh=(read_haplotype_number.y+3)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { float Qm,Qm_1,alpha,delta,xiksi; if(j*4+kk==read_haplotype_number.y) break; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } else { DU= constant[0] /(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } Qm=parameter1[threadIdx.x]; delta=parameter2[threadIdx.x]; xiksi=parameter3[threadIdx.x]; alpha=parameter4[threadIdx.x]; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); char4 read_haplotype_base; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; float aa=(read_haplotype_base.y==read_base_4.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Dl=__fmaf_rn(Dl,constant[4],DDM); Il=__fmaf_rn(MU,delta,IIMI); MMID=__fmaf_rn(alpha,MU,MIIDD); skip=threadIdx.x+blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; // epsion=0.1; // beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,Dl); DDM=__fmul_rn(M2,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4.y)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M2=__fmul_rn(aa,MMID2); D2=__fmaf_rn(D2,constant[4],DDM); Il=__fmaf_rn(Ml,delta,IIMI); MMID2=__fmaf_rn(alpha,Ml,MIIDD); skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; //epsion=0.1; //beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); MID=__fadd_rn(Il,D2); DDM=__fmul_rn(M3,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4.z)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M3=__fmul_rn(aa,MMID3); D3=__fmaf_rn(D3,constant[4],DDM); Il=__fmaf_rn(M2,delta,IIMI); MMID3=__fmaf_rn(alpha,M2,MIIDD); skip+=blockDim.x; Qm=parameter1[skip]; delta=parameter2[skip]; xiksi=parameter3[skip]; alpha=parameter4[skip]; // epsion=0.1; // beta=1.0-epsion; Qm_1=constant[1]-Qm; Qm=fdividef(Qm,constant[2]); //if(i==3) printf("M3=%e I3=%e D3=%e\n", M3,I3,D3); MID=__fadd_rn(Il,D3); DDM=__fmul_rn(M4,xiksi); IIMI=__fmul_rn(Il,constant[4]); aa=(read_haplotype_base.y==read_base_4.w)?Qm_1:Qm; MIIDD=__fmul_rn(constant[3], MID); M4=__fmul_rn(aa,MMID4); D4=__fmaf_rn(D4,constant[4],DDM); Il=__fmaf_rn(M3,delta,IIMI); MMID4=__fmaf_rn(alpha,M3,MIIDD); if(i==read_haplotype_number.x/4-1 && read_haplotype_number.x%4==0) { result_block=__fadd_rn(result_block,__fadd_rn(M4,Il)); } else { MG[index]=M4; IG[index]=Il; DG[index]=D4; } }//4 }//haplotype } //following is only 56 registers for(i=i*4;i<read_haplotype_number.x;i++) { //char4 read_base_4; if(i%4==0) { read_base_4=read_base_array[i/4*constant_int[2]]; } char4 read_haplotype_base; if(i%4==0) read_haplotype_base.x=read_base_4.x; if(i%4==1) read_haplotype_base.x=read_base_4.y; if(i%4==2) read_haplotype_base.x=read_base_4.z; if(i%4==3) read_haplotype_base.x=read_base_4.w; float Qm,Qm_1,alpha,delta,xiksi; delta=parameter2_array[i*constant_int[2]]; xiksi=parameter3_array[i*constant_int[2]]; alpha=parameter4_array[i*constant_int[2]]; Qm=parameter1_array[i*constant_int[2]]; Qm_1=constant[1]-Qm; //epsion=constant[4]; //beta=constant[3]; Qm=fdividef(Qm,constant[2]); float Ml=0;// left M; float Dl=0;// left D; float Il=0; float MU=0;// up M; float IU=0;// up I; float DU=0;// up D; float MMID=0; if(i==0) { DU=constant[0]/(float) read_haplotype_number.y; MMID=__fmul_rn(constant[3],DU); } int hh=(read_haplotype_number.y+4-1)/4; for(int j=0;j<hh;j++) { char4 haplotype_base; haplotype_base=haplotype_base_array[j*constant_int[2]]; for(int kk=0;kk<4;kk++) { if(j*4+kk==read_haplotype_number.y) break; if(kk==0) read_haplotype_base.y=haplotype_base.x; if(kk==1) read_haplotype_base.y=haplotype_base.y; if(kk==2) read_haplotype_base.y=haplotype_base.z; if(kk==3) read_haplotype_base.y=haplotype_base.w; int index=(j*4+kk)*blockDim.x*gridDim.x; if(i>0) { //here should not using offset. But using the //get MU,IU,DU from global memory MU=MG[index]; IU=IG[index]; DU=DG[index]; } float MID=__fadd_rn(IU,DU); float DDM=__fmul_rn(Ml,xiksi); float IIMI=__fmul_rn(IU,constant[4]); float aa=(read_haplotype_base.y==read_haplotype_base.x)? Qm_1:Qm; float MIIDD=__fmul_rn(constant[3],MID); Ml=__fmul_rn(aa,MMID); Il=__fmaf_rn(MU,delta,IIMI); Dl=__fmaf_rn(Dl,constant[4],DDM); MMID=__fmaf_rn(alpha,MU,MIIDD); if(i<read_haplotype_number.x-1) { MG[index]=Ml; IG[index]=Il; DG[index]=Dl; } else result_block=__fadd_rn(result_block,__fadd_rn(Ml,Il)); }//4 } //haplotype }//read result[offset]=result_block; offset+=gridDim.x*blockDim.x ; } } struct InputData { int read_size; char read_base[260]; char base_quals[260]; char ins_quals[260]; char del_quals[260]; char gcp_quals[260]; int haplotype_size; char haplotype_base[500]; }; bool operator<(const InputData &a, const InputData &b) { // return x.point_value > y.point_value; if(a.read_size<b.read_size) return true; if(a.read_size==b.read_size) return a.haplotype_size<b.haplotype_size; else return false; } int main(int argc, char * argv[]) { int INI=(log10f((std::numeric_limits<float>::max() / 16))); //printf("input value of size_each_for \n"); //scanf("%d", &size_each_for); struct timespec start,finish; double computation_time=0,mem_cpy_time=0,read_time=0, data_prepare=0; double total_time=0; FILE * file; float * MG; float * DG; float * IG; cudaMalloc( (float **)& MG,sizeof(float) *128*90*500*3); DG=MG+90*128*500;// ???? IG=DG+90*128*500; //????? //file=fopen("../a.txt","r"); file=fopen("/data/04068/sren/dir_chromosome-10/b.txt","r"); // file=fopen(argv[1],"r"); //file=fopen("32_data.txt","r"); // file=fopen("less.txt","r"); int size; fscanf(file,"%d",&size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); float ph2pr_h[128]; for(int i=0;i<128;i++) { ph2pr_h[i]=powf(10.f, -((float)i) / 10.f); } cudaError err; int constants_h_int[10]; float constants_h[10]; constants_h[0]=1.329228e+36; constants_h[1]=1.0; constants_h[2]=3.0; constants_h[3]=0.9; constants_h[4]=0.1; constants_h[5]=0.0; constants_h_int[0]=0; constants_h_int[1]=128; constants_h_int[2]=32; constants_h_int[3]=4; constants_h_int[4]=3; cudaMemcpyToSymbol(constant,constants_h,sizeof(float)*10 ); cudaMemcpyToSymbol(constant_int,constants_h_int,sizeof(int)*10 ); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); data_prepare+=diff(start,finish); int total=0; char * result_d_total; float read_read, haplotype_haplotype; while(!feof(file)) { total+=size; char useless; useless=fgetc(file); clock_gettime(CLOCK_MONOTONIC_RAW,&start); InputData *inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { int read_size; fscanf(file,"%d\n",&inputdata[i].read_size); fscanf(file,"%s ",inputdata[i].read_base); read_size=inputdata[i].read_size; read_read=read_size; for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i]. base_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].ins_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; fscanf(file,"%d ",&aa); inputdata[i].del_quals[j]=(char)aa; } for(int j=0;j<read_size;j++) { int aa; if(j<read_size-1) fscanf(file,"%d ",&aa); else fscanf(file,"%d \n",&aa); inputdata[i].gcp_quals[j]=(char)aa; } fscanf(file,"%d\n",&inputdata[i].haplotype_size); fscanf(file, "%s\n",inputdata[i].haplotype_base); haplotype_haplotype=inputdata[i].haplotype_size; } clock_gettime(CLOCK_MONOTONIC_RAW,&finish); read_time+=diff(start,finish); float * result_h=(float *) malloc(sizeof(float)*size); struct timespec start_total,finish_total; clock_gettime(CLOCK_MONOTONIC_RAW,&start_total); char * data_h_total; std::sort(inputdata, inputdata+size); //32 one chunck. int malloc_size_for_each_chunk=(65*4*32+260*4*32*4+125*4*32) ; int total_size=(size+31)/32*malloc_size_for_each_chunk+(size*sizeof(NUM_ADD)+127)/128*128; data_h_total=(char*)malloc(total_size); err=cudaMalloc( (char **) &result_d_total,total_size+size*sizeof(float)); if(err!=cudaSuccess) printf("Error %d:%s !\n", err, cudaGetErrorString(err)); char * data_d_total=result_d_total; float * result_d=(float *)(result_d_total+total_size);//last part is to store the result. char * data_h=data_h_total; char * data_h_begin=data_h; NUM_ADD *data_num_add=(NUM_ADD *) (data_h); data_h=data_h+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about alignment. int data_size=0; //for each chunk int total_in_each=(size+31)/32; for(int i=0;i<total_in_each;i++) { //each is 32 //printf("total_in_each %d\n",total_in_each); //read_base int long_read_size=0; //to find the longest read_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_read_size<inputdata[i*32+j].read_size) long_read_size=inputdata[i*32+j].read_size; } int change_length=(long_read_size+3)/4;//because tile=4; each time deal with 4 read char4 read_base_data[32*65]; for(int kk=0;kk<change_length;kk++) { for(int dd=0;dd<32;dd++) // { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk*4) continue; else read_base_data[kk*32+dd].x=inputdata[i*32+dd].read_base[kk*4]; if(inputdata[i*32+dd].read_size<=kk*4+1) continue; else read_base_data[kk*32+dd].y=inputdata[i*32+dd].read_base[kk*4+1]; if(inputdata[i*32+dd].read_size<=kk*4+2) continue; else read_base_data[kk*32+dd].z=inputdata[i*32+dd].read_base[kk*4+2]; if(inputdata[i*32+dd].read_size<=kk*4+3) continue; else read_base_data[kk*32+dd].w=inputdata[i*32+dd].read_base[kk*4+3]; } } //finish read_base float parameter1[260*32];//Qm//128 do not change to 128 float parameter2[260*32];//QI//128 do not change to 128 float parameter3[260*32];//QD/128 do not change to 128 float parameter4[260*32];//alpha//128 do not change to 128 for(int kk=0;kk<long_read_size;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].read_size<=kk) continue; else { parameter1[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].base_quals[kk]&127]; parameter2[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].ins_quals[kk]&127] ; parameter3[kk*32+dd]= ph2pr_h[inputdata[i*32+dd].del_quals[kk]&127] ; parameter4[kk*32+dd]= 1.0f-ph2pr_h[((int)(inputdata[i*32+dd].ins_quals[kk]&127)+(int)( inputdata[i*32+dd].del_quals[kk]&127))&127]; // printf("kk=%d x=%d y=%d z=%d w=%d \n ",kk,parameter1[kk*32+dd],parameter2[kk*32+dd],parameter3[kk*32+dd],parameter4[kk*32+dd] ); } } } //to haplotype into 32 char4 int long_haplotype_size=0; //to find the longest hapltoype_size for(int j=0;j<32;j++) { if(i*32+j>=size) break; if(long_haplotype_size<inputdata[i*32+j].haplotype_size) long_haplotype_size=inputdata[i*32+j].haplotype_size; } int haplotype_change_length=(long_haplotype_size+3)/4; char4 haplotype_base_data[32*125]; for(int kk=0;kk<haplotype_change_length;kk++) { for(int dd=0;dd<32;dd++) { if(i*32+dd>=size) break; if(inputdata[i*32+dd].haplotype_size<=kk*4) continue; else haplotype_base_data[kk*32+dd].x=inputdata[i*32+dd].haplotype_base[kk*4]; if(inputdata[i*32+dd].haplotype_size<=kk*4+1) continue; else haplotype_base_data[kk*32+dd].y=inputdata[i*32+dd].haplotype_base[kk*4+1]; if(inputdata[i*32+dd].haplotype_size<=kk*4+2) continue; else haplotype_base_data[kk*32+dd].z=inputdata[i*32+dd].haplotype_base[kk*4+2]; if(inputdata[i*32+dd].haplotype_size<=kk*4+3) continue; else haplotype_base_data[kk*32+dd].w=inputdata[i*32+dd].haplotype_base[kk*4+3]; } } //put data address to each pair of read and haplotype. // read address memcpy(data_h,read_base_data,sizeof(char4)*32*change_length);//128 for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_haplotype.x=inputdata[i*32+kk].read_size; data_num_add[i*32+kk].read_haplotype.y=inputdata[i*32+kk].haplotype_size; data_num_add[i*32+kk].Read_array=data_size+sizeof(char4)*kk; // printf("set read size %d %d \n", data_num_add[i*32+kk].read_number,data_num_add[i*32+kk].haplotype_number); } data_h+=sizeof(char4)*32*change_length; data_size+=sizeof(char4)*32*change_length; //parameter address memcpy(data_h,parameter1,sizeof(float)*32*long_read_size); for(int kk=0;kk<32;kk++) { if(i*32+kk>=size) break; data_num_add[i*32+kk].read_large_length=long_read_size; } data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter2,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter3,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; memcpy(data_h,parameter4,sizeof(float)*32*long_read_size); data_h+=sizeof(float)*32*long_read_size; data_size+=sizeof(float)*32*long_read_size; //haplotype address memcpy(data_h,haplotype_base_data,sizeof(char4)*32*haplotype_change_length); data_h+=sizeof(char4)*32*haplotype_change_length; data_size+=sizeof(char4)*32*haplotype_change_length; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; char * data_d; NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); data_d=data_d_total+(sizeof(NUM_ADD)*size+127)/128*128; //printf("data_d_total %p num_add_d %p data_d %p \n",data_d_total, num_add_d,data_d); int blocksize=128; int gridsize=90; dim3 block(blocksize); dim3 grid(gridsize); // global memory to be used by GPU kernels. //float * MG; // float * DG; // float * IG; clock_gettime(CLOCK_MONOTONIC_RAW,&start); err=cudaMemcpy(data_d_total,data_h_begin,data_size_to_copy,cudaMemcpyHostToDevice); if(err!=cudaSuccess) printf("Error %d: %s !\n", err, cudaGetErrorString(err)); //float * MG; // float * DG; // float * IG; //cudaMalloc( (float **)& MG,sizeof(float) *blocksize*gridsize*500*3); //DG=MG+blocksize*gridsize*500;// ???? //IG=DG+blocksize*gridsize*500; //????? pairHMM<<<grid,block>>> (size,data_d,num_add_d, result_d,MG,DG,IG); cudaMemcpy(result_h,result_d,size*sizeof(float),cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); for(int i=0;i<size;i++) float aa=(log10f((double)result_h[i]) - INI); // printf(" i=%d %e\n",i, result_h[i]); free(data_h_total); cudaFree(result_d_total); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish_total); total_time+=diff(start_total,finish_total); free(inputdata); fscanf(file,"%d",&size); free(result_h); // printf("%d\n",size); // if(total>10000) // break; } // clock_gettime(CLOCK_MONOTONIC_RAW,&start); cudaFree(MG); cudaDeviceReset(); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // mem_cpy_time+=diff(start,finish);//(finish1.tv_nsec-start1.tv_nsec)/1000000000.0; printf("read_time=%e initial_time=%e computation_time= %e total_time=%e\n",read_time, data_prepare,computation_time, computation_time+mem_cpy_time); printf("Total time=%e\n",total_time); // printf("GCUPS: %lf \n", fakesize*read_read*haplotype_haplotype/computation_time/1000000000); return 0; }
526cfc043ae848c58738a2341f87f523c9818416.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include "cuda_utils.cuh" #include "linalg/reduce.cuh" #include "random/rng.cuh" #include "reduce_hip.cuh" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T> struct ReduceInputs { T tolerance; int rows, cols; bool rowMajor, alongRows; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const ReduceInputs<T> &dims) { return os; } // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename T> void reduceLaunch(T *dots, const T *data, int cols, int rows, bool rowMajor, bool alongRows, bool inplace, hipStream_t stream) { reduce(dots, data, cols, rows, (T)0, rowMajor, alongRows, stream, inplace, [] __device__(T in, int i) { return in * in; }); } template <typename T> class ReduceTest : public ::testing::TestWithParam<ReduceInputs<T>> { protected: void SetUp() override { CUDA_CHECK(hipStreamCreate(&stream)); params = ::testing::TestWithParam<ReduceInputs<T>>::GetParam(); Random::Rng r(params.seed); int rows = params.rows, cols = params.cols; int len = rows * cols; outlen = params.alongRows ? rows : cols; allocate(data, len); allocate(dots_exp, outlen); allocate(dots_act, outlen); r.uniform(data, len, T(-1.0), T(1.0), stream); naiveReduction(dots_exp, data, cols, rows, params.rowMajor, params.alongRows, stream); // Perform reduction with default inplace = false first reduceLaunch(dots_act, data, cols, rows, params.rowMajor, params.alongRows, false, stream); // Add to result with inplace = true next, which shouldn't affect // in the case of coalescedReduction! if (!(params.rowMajor ^ params.alongRows)) { reduceLaunch(dots_act, data, cols, rows, params.rowMajor, params.alongRows, true, stream); } } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(dots_exp)); CUDA_CHECK(hipFree(dots_act)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: ReduceInputs<T> params; T *data, *dots_exp, *dots_act; int outlen; hipStream_t stream; }; const std::vector<ReduceInputs<float>> inputsf = { {0.000002f, 1024, 32, true, true, 1234ULL}, {0.000002f, 1024, 64, true, true, 1234ULL}, {0.000002f, 1024, 128, true, true, 1234ULL}, {0.000002f, 1024, 256, true, true, 1234ULL}, {0.000002f, 1024, 32, true, false, 1234ULL}, {0.000002f, 1024, 64, true, false, 1234ULL}, {0.000002f, 1024, 128, true, false, 1234ULL}, {0.000002f, 1024, 256, true, false, 1234ULL}, {0.000002f, 1024, 32, false, true, 1234ULL}, {0.000002f, 1024, 64, false, true, 1234ULL}, {0.000002f, 1024, 128, false, true, 1234ULL}, {0.000002f, 1024, 256, false, true, 1234ULL}, {0.000002f, 1024, 32, false, false, 1234ULL}, {0.000002f, 1024, 64, false, false, 1234ULL}, {0.000002f, 1024, 128, false, false, 1234ULL}, {0.000002f, 1024, 256, false, false, 1234ULL}}; const std::vector<ReduceInputs<double>> inputsd = { {0.000000001, 1024, 32, true, true, 1234ULL}, {0.000000001, 1024, 64, true, true, 1234ULL}, {0.000000001, 1024, 128, true, true, 1234ULL}, {0.000000001, 1024, 256, true, true, 1234ULL}, {0.000000001, 1024, 32, true, false, 1234ULL}, {0.000000001, 1024, 64, true, false, 1234ULL}, {0.000000001, 1024, 128, true, false, 1234ULL}, {0.000000001, 1024, 256, true, false, 1234ULL}, {0.000000001, 1024, 32, false, true, 1234ULL}, {0.000000001, 1024, 64, false, true, 1234ULL}, {0.000000001, 1024, 128, false, true, 1234ULL}, {0.000000001, 1024, 256, false, true, 1234ULL}, {0.000000001, 1024, 32, false, false, 1234ULL}, {0.000000001, 1024, 64, false, false, 1234ULL}, {0.000000001, 1024, 128, false, false, 1234ULL}, {0.000000001, 1024, 256, false, false, 1234ULL}}; typedef ReduceTest<float> ReduceTestF; TEST_P(ReduceTestF, Result) { ASSERT_TRUE(devArrMatch(dots_exp, dots_act, outlen, CompareApprox<float>(params.tolerance))); } typedef ReduceTest<double> ReduceTestD; TEST_P(ReduceTestD, Result) { ASSERT_TRUE(devArrMatch(dots_exp, dots_act, outlen, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceTests, ReduceTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(ReduceTests, ReduceTestD, ::testing::ValuesIn(inputsd)); } // end namespace LinAlg } // end namespace MLCommon
526cfc043ae848c58738a2341f87f523c9818416.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include "cuda_utils.cuh" #include "linalg/reduce.cuh" #include "random/rng.cuh" #include "reduce.cuh" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T> struct ReduceInputs { T tolerance; int rows, cols; bool rowMajor, alongRows; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const ReduceInputs<T> &dims) { return os; } // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename T> void reduceLaunch(T *dots, const T *data, int cols, int rows, bool rowMajor, bool alongRows, bool inplace, cudaStream_t stream) { reduce(dots, data, cols, rows, (T)0, rowMajor, alongRows, stream, inplace, [] __device__(T in, int i) { return in * in; }); } template <typename T> class ReduceTest : public ::testing::TestWithParam<ReduceInputs<T>> { protected: void SetUp() override { CUDA_CHECK(cudaStreamCreate(&stream)); params = ::testing::TestWithParam<ReduceInputs<T>>::GetParam(); Random::Rng r(params.seed); int rows = params.rows, cols = params.cols; int len = rows * cols; outlen = params.alongRows ? rows : cols; allocate(data, len); allocate(dots_exp, outlen); allocate(dots_act, outlen); r.uniform(data, len, T(-1.0), T(1.0), stream); naiveReduction(dots_exp, data, cols, rows, params.rowMajor, params.alongRows, stream); // Perform reduction with default inplace = false first reduceLaunch(dots_act, data, cols, rows, params.rowMajor, params.alongRows, false, stream); // Add to result with inplace = true next, which shouldn't affect // in the case of coalescedReduction! if (!(params.rowMajor ^ params.alongRows)) { reduceLaunch(dots_act, data, cols, rows, params.rowMajor, params.alongRows, true, stream); } } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(dots_exp)); CUDA_CHECK(cudaFree(dots_act)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: ReduceInputs<T> params; T *data, *dots_exp, *dots_act; int outlen; cudaStream_t stream; }; const std::vector<ReduceInputs<float>> inputsf = { {0.000002f, 1024, 32, true, true, 1234ULL}, {0.000002f, 1024, 64, true, true, 1234ULL}, {0.000002f, 1024, 128, true, true, 1234ULL}, {0.000002f, 1024, 256, true, true, 1234ULL}, {0.000002f, 1024, 32, true, false, 1234ULL}, {0.000002f, 1024, 64, true, false, 1234ULL}, {0.000002f, 1024, 128, true, false, 1234ULL}, {0.000002f, 1024, 256, true, false, 1234ULL}, {0.000002f, 1024, 32, false, true, 1234ULL}, {0.000002f, 1024, 64, false, true, 1234ULL}, {0.000002f, 1024, 128, false, true, 1234ULL}, {0.000002f, 1024, 256, false, true, 1234ULL}, {0.000002f, 1024, 32, false, false, 1234ULL}, {0.000002f, 1024, 64, false, false, 1234ULL}, {0.000002f, 1024, 128, false, false, 1234ULL}, {0.000002f, 1024, 256, false, false, 1234ULL}}; const std::vector<ReduceInputs<double>> inputsd = { {0.000000001, 1024, 32, true, true, 1234ULL}, {0.000000001, 1024, 64, true, true, 1234ULL}, {0.000000001, 1024, 128, true, true, 1234ULL}, {0.000000001, 1024, 256, true, true, 1234ULL}, {0.000000001, 1024, 32, true, false, 1234ULL}, {0.000000001, 1024, 64, true, false, 1234ULL}, {0.000000001, 1024, 128, true, false, 1234ULL}, {0.000000001, 1024, 256, true, false, 1234ULL}, {0.000000001, 1024, 32, false, true, 1234ULL}, {0.000000001, 1024, 64, false, true, 1234ULL}, {0.000000001, 1024, 128, false, true, 1234ULL}, {0.000000001, 1024, 256, false, true, 1234ULL}, {0.000000001, 1024, 32, false, false, 1234ULL}, {0.000000001, 1024, 64, false, false, 1234ULL}, {0.000000001, 1024, 128, false, false, 1234ULL}, {0.000000001, 1024, 256, false, false, 1234ULL}}; typedef ReduceTest<float> ReduceTestF; TEST_P(ReduceTestF, Result) { ASSERT_TRUE(devArrMatch(dots_exp, dots_act, outlen, CompareApprox<float>(params.tolerance))); } typedef ReduceTest<double> ReduceTestD; TEST_P(ReduceTestD, Result) { ASSERT_TRUE(devArrMatch(dots_exp, dots_act, outlen, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceTests, ReduceTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(ReduceTests, ReduceTestD, ::testing::ValuesIn(inputsd)); } // end namespace LinAlg } // end namespace MLCommon
38fdf3792b069d36f608a745db9bf34e8b28b2b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include <hipcub/hipcub.hpp> #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace { template<typename T> class TmpBufferManager final { public: OF_DISALLOW_COPY_AND_MOVE(TmpBufferManager); TmpBufferManager(int32_t capacity, void* ptr, int32_t instance_num) : capacity_{capacity}, key_value_out_elem_cnt_{instance_num} { const int32_t key_value_out_aligned_bytes = GetCudaAlignedSize(key_value_out_elem_cnt_ * sizeof(hipcub::KeyValuePair<int32_t, T>)); key_value_out_ptr_ = reinterpret_cast<hipcub::KeyValuePair<int32_t, T>*>(ptr); temp_storage_ptr_ = reinterpret_cast<void*>(reinterpret_cast<char*>(key_value_out_ptr_) + key_value_out_aligned_bytes); temp_storage_bytes_ = capacity_ - key_value_out_aligned_bytes; CHECK_GE(temp_storage_bytes_, 0); } ~TmpBufferManager() = default; hipcub::KeyValuePair<int32_t, T>* KeyValueOutPtr() const { return key_value_out_ptr_; } void* TempStoragePtr() const { return temp_storage_ptr_; } int32_t TempStorageBytes() const { return temp_storage_bytes_; } private: int32_t capacity_; hipcub::KeyValuePair<int32_t, T>* key_value_out_ptr_; void* temp_storage_ptr_; int32_t key_value_out_elem_cnt_; int32_t temp_storage_bytes_; }; class MultiplyFunctor final { public: MultiplyFunctor(int32_t num_col) : num_col_(num_col) {} __host__ __device__ __forceinline__ int32_t operator()(int32_t idx) const { return idx * num_col_; } private: int32_t num_col_; }; template<typename T> size_t InferTempStorageForArgMax(int32_t num_row, int32_t num_col) { using SegmentOffsetIter = hipcub::TransformInputIterator<int32_t, MultiplyFunctor, hipcub::CountingInputIterator<int32_t>>; hipcub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); size_t temp_storage_bytes = 0; auto err = hipcub::DeviceSegmentedReduce::ArgMax<T*, hipcub::KeyValuePair<int32_t, T>*, SegmentOffsetIter>( /* d_temp_storage */ nullptr, /* temp_storage_bytes */ temp_storage_bytes, /* d_in */ nullptr, /* d_out */ nullptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ 0); OF_CUDA_CHECK(err); return temp_storage_bytes; } template<typename T> void ArgMax(const T* in_ptr, int32_t num_row, int32_t num_col, void* temp_storage_ptr, int32_t temp_storage_bytes, hipcub::KeyValuePair<int32_t, T>* out_ptr, hipStream_t stream) { size_t rt_inferred_temp_storage_bytes = InferTempStorageForArgMax<T>(num_row, num_col); CHECK_LE(rt_inferred_temp_storage_bytes, temp_storage_bytes); using SegmentOffsetIter = hipcub::TransformInputIterator<int32_t, MultiplyFunctor, hipcub::CountingInputIterator<int32_t>>; hipcub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); auto err = hipcub::DeviceSegmentedReduce::ArgMax( /* d_temp_storage */ temp_storage_ptr, /* temp_storage_bytes */ rt_inferred_temp_storage_bytes, /* d_in */ in_ptr, /* d_out */ out_ptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ stream); OF_CUDA_CHECK(err); } template<typename T> __global__ void WriteKeysToOutput(const int32_t instance_num, const hipcub::KeyValuePair<int32_t, T>* key_value_out_ptr, int64_t* out_ptr) { CUDA_1D_KERNEL_LOOP(i, instance_num) { out_ptr[i] = key_value_out_ptr[i].key; } } } // namespace template<typename T> class GpuArgMaxKernel final : public user_op::OpKernel { public: GpuArgMaxKernel() = default; ~GpuArgMaxKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const int32_t elem_cnt = in->shape_view().elem_cnt(); CHECK_GE(elem_cnt, 0); if (elem_cnt == 0) { return; } const int32_t instance_size = in->shape_view().At(in->shape_view().NumAxes() - 1); const int32_t instance_num = elem_cnt / instance_size; TmpBufferManager<T> buffer_manager(tmp_buffer->shape_view().elem_cnt(), tmp_buffer->mut_dptr<void>(), instance_num); ArgMax(in->dptr<T>(), instance_num, instance_size, buffer_manager.TempStoragePtr(), buffer_manager.TempStorageBytes(), buffer_manager.KeyValueOutPtr(), ctx->stream()->As<ep::CudaStream>()->cuda_stream()); hipLaunchKernelGGL(( WriteKeysToOutput<T>), dim3(BlocksNum4ThreadsNum(instance_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), instance_num, buffer_manager.KeyValueOutPtr(), out->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_CUDA_ARGMAX_KERNEL(dtype) \ REGISTER_USER_KERNEL("argmax") \ .SetCreateFn<GpuArgMaxKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const Shape& in_shape = ctx->InputShape("in", 0); \ const int32_t instance_size = in_shape.dim_vec().back(); \ const int32_t instance_num = in_shape.elem_cnt() / instance_size; \ \ /* Key-Value Out */ \ int32_t key_value_out_bytes = \ GetCudaAlignedSize(instance_num * sizeof(hipcub::KeyValuePair<int32_t, dtype>)); \ \ /* CUB Temp Storage */ \ size_t temp_storage_bytes = InferTempStorageForArgMax<dtype>(instance_num, instance_size); \ \ return key_value_out_bytes + temp_storage_bytes; \ }); REGISTER_CUDA_ARGMAX_KERNEL(bool) REGISTER_CUDA_ARGMAX_KERNEL(float) REGISTER_CUDA_ARGMAX_KERNEL(double) REGISTER_CUDA_ARGMAX_KERNEL(uint8_t) REGISTER_CUDA_ARGMAX_KERNEL(int8_t) REGISTER_CUDA_ARGMAX_KERNEL(int32_t) REGISTER_CUDA_ARGMAX_KERNEL(int64_t) REGISTER_CUDA_ARGMAX_KERNEL(half) } // namespace oneflow
38fdf3792b069d36f608a745db9bf34e8b28b2b7.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include <cub/cub.cuh> #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace { template<typename T> class TmpBufferManager final { public: OF_DISALLOW_COPY_AND_MOVE(TmpBufferManager); TmpBufferManager(int32_t capacity, void* ptr, int32_t instance_num) : capacity_{capacity}, key_value_out_elem_cnt_{instance_num} { const int32_t key_value_out_aligned_bytes = GetCudaAlignedSize(key_value_out_elem_cnt_ * sizeof(cub::KeyValuePair<int32_t, T>)); key_value_out_ptr_ = reinterpret_cast<cub::KeyValuePair<int32_t, T>*>(ptr); temp_storage_ptr_ = reinterpret_cast<void*>(reinterpret_cast<char*>(key_value_out_ptr_) + key_value_out_aligned_bytes); temp_storage_bytes_ = capacity_ - key_value_out_aligned_bytes; CHECK_GE(temp_storage_bytes_, 0); } ~TmpBufferManager() = default; cub::KeyValuePair<int32_t, T>* KeyValueOutPtr() const { return key_value_out_ptr_; } void* TempStoragePtr() const { return temp_storage_ptr_; } int32_t TempStorageBytes() const { return temp_storage_bytes_; } private: int32_t capacity_; cub::KeyValuePair<int32_t, T>* key_value_out_ptr_; void* temp_storage_ptr_; int32_t key_value_out_elem_cnt_; int32_t temp_storage_bytes_; }; class MultiplyFunctor final { public: MultiplyFunctor(int32_t num_col) : num_col_(num_col) {} __host__ __device__ __forceinline__ int32_t operator()(int32_t idx) const { return idx * num_col_; } private: int32_t num_col_; }; template<typename T> size_t InferTempStorageForArgMax(int32_t num_row, int32_t num_col) { using SegmentOffsetIter = cub::TransformInputIterator<int32_t, MultiplyFunctor, cub::CountingInputIterator<int32_t>>; cub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); size_t temp_storage_bytes = 0; auto err = cub::DeviceSegmentedReduce::ArgMax<T*, cub::KeyValuePair<int32_t, T>*, SegmentOffsetIter>( /* d_temp_storage */ nullptr, /* temp_storage_bytes */ temp_storage_bytes, /* d_in */ nullptr, /* d_out */ nullptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ 0); OF_CUDA_CHECK(err); return temp_storage_bytes; } template<typename T> void ArgMax(const T* in_ptr, int32_t num_row, int32_t num_col, void* temp_storage_ptr, int32_t temp_storage_bytes, cub::KeyValuePair<int32_t, T>* out_ptr, cudaStream_t stream) { size_t rt_inferred_temp_storage_bytes = InferTempStorageForArgMax<T>(num_row, num_col); CHECK_LE(rt_inferred_temp_storage_bytes, temp_storage_bytes); using SegmentOffsetIter = cub::TransformInputIterator<int32_t, MultiplyFunctor, cub::CountingInputIterator<int32_t>>; cub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); auto err = cub::DeviceSegmentedReduce::ArgMax( /* d_temp_storage */ temp_storage_ptr, /* temp_storage_bytes */ rt_inferred_temp_storage_bytes, /* d_in */ in_ptr, /* d_out */ out_ptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ stream); OF_CUDA_CHECK(err); } template<typename T> __global__ void WriteKeysToOutput(const int32_t instance_num, const cub::KeyValuePair<int32_t, T>* key_value_out_ptr, int64_t* out_ptr) { CUDA_1D_KERNEL_LOOP(i, instance_num) { out_ptr[i] = key_value_out_ptr[i].key; } } } // namespace template<typename T> class GpuArgMaxKernel final : public user_op::OpKernel { public: GpuArgMaxKernel() = default; ~GpuArgMaxKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const int32_t elem_cnt = in->shape_view().elem_cnt(); CHECK_GE(elem_cnt, 0); if (elem_cnt == 0) { return; } const int32_t instance_size = in->shape_view().At(in->shape_view().NumAxes() - 1); const int32_t instance_num = elem_cnt / instance_size; TmpBufferManager<T> buffer_manager(tmp_buffer->shape_view().elem_cnt(), tmp_buffer->mut_dptr<void>(), instance_num); ArgMax(in->dptr<T>(), instance_num, instance_size, buffer_manager.TempStoragePtr(), buffer_manager.TempStorageBytes(), buffer_manager.KeyValueOutPtr(), ctx->stream()->As<ep::CudaStream>()->cuda_stream()); WriteKeysToOutput<T><<<BlocksNum4ThreadsNum(instance_num), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( instance_num, buffer_manager.KeyValueOutPtr(), out->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_CUDA_ARGMAX_KERNEL(dtype) \ REGISTER_USER_KERNEL("argmax") \ .SetCreateFn<GpuArgMaxKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const Shape& in_shape = ctx->InputShape("in", 0); \ const int32_t instance_size = in_shape.dim_vec().back(); \ const int32_t instance_num = in_shape.elem_cnt() / instance_size; \ \ /* Key-Value Out */ \ int32_t key_value_out_bytes = \ GetCudaAlignedSize(instance_num * sizeof(cub::KeyValuePair<int32_t, dtype>)); \ \ /* CUB Temp Storage */ \ size_t temp_storage_bytes = InferTempStorageForArgMax<dtype>(instance_num, instance_size); \ \ return key_value_out_bytes + temp_storage_bytes; \ }); REGISTER_CUDA_ARGMAX_KERNEL(bool) REGISTER_CUDA_ARGMAX_KERNEL(float) REGISTER_CUDA_ARGMAX_KERNEL(double) REGISTER_CUDA_ARGMAX_KERNEL(uint8_t) REGISTER_CUDA_ARGMAX_KERNEL(int8_t) REGISTER_CUDA_ARGMAX_KERNEL(int32_t) REGISTER_CUDA_ARGMAX_KERNEL(int64_t) REGISTER_CUDA_ARGMAX_KERNEL(half) } // namespace oneflow
d22dedee565e7621b7fd9082e54c4cad43064f13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <hip/hip_runtime.h> extern "C" __global__ void dist2_1(float *in1, float *in2, float *out, int rows, int columns){ int column = threadIdx.x + blockIdx.x*blockDim.x; if (column < columns) { out[column] = sqrt((in1[column] - in2[0])*(in1[column] - in2[0]) + (in1[column + columns] - in2[1])*(in1[column + columns] - in2[1]) + (in1[column + 2 * columns] - in2[2])*(in1[column + 2 * columns] - in2[2])); } }
d22dedee565e7621b7fd9082e54c4cad43064f13.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <cuda.h> extern "C" __global__ void dist2_1(float *in1, float *in2, float *out, int rows, int columns){ int column = threadIdx.x + blockIdx.x*blockDim.x; if (column < columns) { out[column] = sqrt((in1[column] - in2[0])*(in1[column] - in2[0]) + (in1[column + columns] - in2[1])*(in1[column + columns] - in2[1]) + (in1[column + 2 * columns] - in2[2])*(in1[column + 2 * columns] - in2[2])); } }
c9ca963d58a0ada7ccabd5027fa4a1ec6a9646d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #define N 200 __global__ void reverse(int *a, int *b) { int idx = blockIdx.x * blockDim.x + threadIdx.x; b[gridDim.x - idx - 1] = a[idx]; } void random_ints(int *p, int n) { int i; for (i = 0; i < n; i++) { p[i] = rand() % 100; } } int main(void) { int *a, *b; // host copies of a, b, c int *dev_a, *dev_b; // device copies of a, b, c int size = N * sizeof(int); // we need space for 512 // // integers int i; // allocate device copies of a, b, c hipMalloc((void **)&dev_a, size); hipMalloc((void **)&dev_b, size); a = (int *)malloc(size); b = (int *)malloc(size); random_ints(a, N); // copy inputs to device hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice); // launch an add() kernel with N threads hipLaunchKernelGGL(( reverse), dim3(N), dim3(1), 0, 0, dev_a, dev_b); // copy device result back to host copy of c hipMemcpy(b, dev_b, size, hipMemcpyDeviceToHost); for (i = 0; i < N; i++) { if (b[i] != a[N - 1 - i]) { printf("Uncorrect\n"); break; } } printf("Correct\n"); free(a); free(b); hipFree(dev_a); hipFree(dev_b); return 0; }
c9ca963d58a0ada7ccabd5027fa4a1ec6a9646d9.cu
#include <math.h> #include <stdio.h> #define N 200 __global__ void reverse(int *a, int *b) { int idx = blockIdx.x * blockDim.x + threadIdx.x; b[gridDim.x - idx - 1] = a[idx]; } void random_ints(int *p, int n) { int i; for (i = 0; i < n; i++) { p[i] = rand() % 100; } } int main(void) { int *a, *b; // host copies of a, b, c int *dev_a, *dev_b; // device copies of a, b, c int size = N * sizeof(int); // we need space for 512 // // integers int i; // allocate device copies of a, b, c cudaMalloc((void **)&dev_a, size); cudaMalloc((void **)&dev_b, size); a = (int *)malloc(size); b = (int *)malloc(size); random_ints(a, N); // copy inputs to device cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); // launch an add() kernel with N threads reverse<<<N, 1>>>(dev_a, dev_b); // copy device result back to host copy of c cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost); for (i = 0; i < N; i++) { if (b[i] != a[N - 1 - i]) { printf("Uncorrect\n"); break; } } printf("Correct\n"); free(a); free(b); cudaFree(dev_a); cudaFree(dev_b); return 0; }
cbba3463ec59e16ee322a0d32b10279d0e082f16.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> extern "C" __global__ void kern(int *out) { out[0] = 1; }
cbba3463ec59e16ee322a0d32b10279d0e082f16.cu
#include <cuda.h> extern "C" __global__ void kern(int *out) { out[0] = 1; }
93888832a99915dc908036752bc9dc6fa985f4be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "device_launch_parameters.h" #define imin(a, b) (a < b ? a : b) #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock); __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main(void) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float *)malloc(N * sizeof(float)); b = (float *)malloc(N * sizeof(float)); partial_c = (float *)malloc(blocksPerGrid * sizeof(float)); HANDLE_ERROR(hipMalloc((void **)&dev_a, N * sizeof(float))); HANDLE_ERROR(hipMalloc((void **)&dev_b, N * sizeof(float))); HANDLE_ERROR(hipMalloc((void **)&dev_partial_c, blocksPerGrid * sizeof(float))); for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(float), hipMemcpyHostToDevice)); dot << <blocksPerGrid, threadsPerBlock >> >(dev_a, dev_b, dev_partial_c); HANDLE_ERROR(hipMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost)); c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares((float)(N - 1))); HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipFree(dev_b)); HANDLE_ERROR(hipFree(dev_partial_c)); free(a); free(b); free(partial_c); }
93888832a99915dc908036752bc9dc6fa985f4be.cu
#include "common.h" #include "device_launch_parameters.h" #define imin(a, b) (a < b ? a : b) #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock); __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main(void) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float *)malloc(N * sizeof(float)); b = (float *)malloc(N * sizeof(float)); partial_c = (float *)malloc(blocksPerGrid * sizeof(float)); HANDLE_ERROR(cudaMalloc((void **)&dev_a, N * sizeof(float))); HANDLE_ERROR(cudaMalloc((void **)&dev_b, N * sizeof(float))); HANDLE_ERROR(cudaMalloc((void **)&dev_partial_c, blocksPerGrid * sizeof(float))); for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice)); dot << <blocksPerGrid, threadsPerBlock >> >(dev_a, dev_b, dev_partial_c); HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost)); c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares((float)(N - 1))); HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaFree(dev_b)); HANDLE_ERROR(cudaFree(dev_partial_c)); free(a); free(b); free(partial_c); }
4400cb0be36466c31c9c6bfd2513b68be14885a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ // ----------------------------------------------------------------------- // Fast CUDA Radix Sort Implementation // // The parallel radix sort algorithm implemented by this code is described // in the following paper. // // Satish, N., Harris, M., and Garland, M. "Designing Efficient Sorting // Algorithms for Manycore GPUs". In Proceedings of IEEE International // Parallel & Distributed Processing Symposium 2009 (IPDPS 2009). // // ----------------------------------------------------------------------- #include "radixsort.h" #include "cudpp/cudpp.h" #include <algorithm> #include <stdio.h> enum kernelName { SORT_KERNEL_EMPTY, SORT_KERNEL_RADIX_SORT_BLOCKS, SORT_KERNEL_RADIX_SORT_BLOCKS_KEYSONLY, SORT_KERNEL_FIND_RADIX_OFFSETS, SORT_KERNEL_REORDER_DATA, SORT_KERNEL_REORDER_DATA_KEYSONLY, SORT_KERNEL_COUNT, }; bool bManualCoalesce = false; unsigned int numCTAs[SORT_KERNEL_COUNT] = { 0, 0, 0, 0, 0, 0 }; unsigned int numSMs = 0; unsigned int persistentCTAThreshold[2] = { 0, 0 }; unsigned int persistentCTAThresholdFullBlocks[2] = { 0, 0 }; // Replace <stl> library min/max equivalents with MIN/MAX #define MIN(a,b) (a < b ? a : b) #define MAX(a,b) (a > b ? a : b) extern "C" void initDeviceParameters() { int deviceID = -1; if (hipSuccess == hipGetDevice(&deviceID)) { hipDeviceProp_t devprop; hipGetDeviceProperties(&devprop, deviceID); // sm_12 and later devices don't need help with coalesce bManualCoalesce = (devprop.major < 2 && devprop.minor < 2); // Empirically we have found that for some (usually larger) sort // sizes it is better to use exactly as many "persistent" CTAs // as can fill the GPU, which loop over the "blocks" of work. For smaller // arrays it is better to use the typical CUDA approach of launching one CTA // per block of work. // 0-element of these two-element arrays is for key-value sorts // 1-element is for key-only sorts persistentCTAThreshold[0] = bManualCoalesce ? 16777216 : 524288; persistentCTAThresholdFullBlocks[0] = bManualCoalesce ? 2097152: 524288; persistentCTAThreshold[1] = bManualCoalesce ? 16777216 : 8388608; persistentCTAThresholdFullBlocks[1] = bManualCoalesce ? 2097152: 0; // Determine the maximum number of CTAs that can be run simultaneously for each kernel // This is equivalent to the calculation done in the CUDA Occupancy Calculator spreadsheet unsigned regAllocationUnit = (devprop.major < 2 && devprop.minor < 2) ? 256 : 512; // in registers unsigned smemAllocationUnit = 512; // in bytes unsigned maxThreadsPerSM = bManualCoalesce ? 768 : 1024; // sm_12 GPUs increase threads/SM to 1024 // These values were obtained using --ptxas-options=-v // Note on compute version 1.1 and earlier (sm_11) GPUs the code // must be compiled with -maxrregcount=32 (that's a good idea in general) unsigned regsPerThread[SORT_KERNEL_COUNT] = {0, 32, 28, 8, 14, 13}; unsigned sharedMemBytes[SORT_KERNEL_COUNT] = {32, 4144, 4144, 3120, 4352, 2288}; numSMs = devprop.multiProcessorCount; for (int i = 0; i < SORT_KERNEL_COUNT; ++i) { size_t regsPerCTA = regsPerThread[i] * RadixSort::CTA_SIZE; regsPerCTA += (regsPerCTA % regAllocationUnit); // round up to nearest allocation unit regsPerCTA = MAX(regsPerCTA, regAllocationUnit); // ensure we round up very small amounts correctly size_t ctaLimitRegs = (unsigned)devprop.regsPerBlock / regsPerCTA; size_t smemPerCTA = MAX(smemAllocationUnit, sharedMemBytes[i] + (sharedMemBytes[i] % smemAllocationUnit)); size_t ctaLimitSMem = devprop.sharedMemPerBlock / smemPerCTA; size_t ctaLimitThreads = maxThreadsPerSM / RadixSort::CTA_SIZE; numCTAs[i] = numSMs * MIN(ctaLimitRegs, MIN(ctaLimitSMem, MIN(ctaLimitThreads, 8))); } } } // In emulationmode, we need __syncthreads() inside warp-synchronous code, // but we don't in code running on the GPU, so we define this macro to use // in the warp-scan portion of the radix sort (see CUDPP for information // on the warp scan algorithm. #ifdef __DEVICE_EMULATION__ #define __SYNC __syncthreads(); #else #define __SYNC #endif typedef unsigned int uint; extern "C" void checkCudaError(const char *msg) { #if defined(_DEBUG) || defined(DEBUG) hipError_t e = hipDeviceSynchronize(); if( e != hipSuccess ) { fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e)); exit(EXIT_FAILURE); } e = hipGetLastError(); if( e != hipSuccess ) { fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e)); exit(EXIT_FAILURE); } #endif } // ----------------------------------------------------------------------------------------------- // The floatFlip and floatUnflip functions below are based on code in the web article // "Radix Tricks" by Michael Herf (http://www.stereopsis.com/radix.html). They are used to convert // floating point values into sortable unsigned integers (and back). // // Paraphrasing Michael: Binary single-precision floating point numbers have two features that // keep them from being directly sortable. First, the sign bit is set when the value is negative, // which means that all negative numbers are bigger than positive ones. Second, the values are // signed-magnitude, so "more negative" floating point numbers actually look bigger to a normal // bitwise comparison. // // "To fix our floating point numbers, we define the following rules: // // 1. Always flip the sign bit. // 2. If the sign bit was set, flip the other bits too. // // To get back, we flip the sign bit always, and if the sign bit was not set, we flip the other // bits too." // // This is a very inexpensive operation and it is only done on the first and last steps of the // sort. // ----------------------------------------------------------------------------------------------- // ================================================================================================ // Flip a float for sorting // finds SIGN of fp number. // if it's 1 (negative float), it flips all bits // if it's 0 (positive float), it flips the sign only // ================================================================================================ template <bool doFlip> __device__ uint floatFlip(uint f) { if (doFlip) { uint mask = -int(f >> 31) | 0x80000000; return f ^ mask; } else return f; } // ================================================================================================ // flip a float back (invert FloatFlip) // signed was flipped from above, so: // if sign is 1 (negative), it flips the sign bit back // if sign is 0 (positive), it flips all bits back // ================================================================================================ template <bool doFlip> __device__ uint floatUnflip(uint f) { if (doFlip) { uint mask = ((f >> 31) - 1) | 0x80000000; return f ^ mask; } else return f; } // ================================================================================================ // Kernel to flip all floats in an array (see floatFlip, above) // Each thread flips four values (each 256-thread CTA flips 1024 values). // ================================================================================================ __global__ void flipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); } // ================================================================================================ // Kernel to unflip all floats in an array (see floatUnflip, above) // Each thread unflips four values (each 256-thread CTA unflips 1024 values). // ================================================================================================ __global__ void unflipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); } //---------------------------------------------------------------------------- // Scans each warp in parallel ("warp-scan"), one element per thread. // uses 2 numElements of shared memory per thread (64 = elements per warp) //---------------------------------------------------------------------------- template<class T, int maxlevel> __device__ T scanwarp(T val, T* sData) { // The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp = // 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1)) int idx = 2 * threadIdx.x - (threadIdx.x & (RadixSort::WARP_SIZE - 1)); sData[idx] = 0; idx += RadixSort::WARP_SIZE; sData[idx] = val; __SYNC #ifdef __DEVICE_EMULATION__ T t = sData[idx - 1]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 2]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 4]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 8]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 16]; __SYNC sData[idx] += t; __SYNC #else if (0 <= maxlevel) { sData[idx] += sData[idx - 1]; } __SYNC if (1 <= maxlevel) { sData[idx] += sData[idx - 2]; } __SYNC if (2 <= maxlevel) { sData[idx] += sData[idx - 4]; } __SYNC if (3 <= maxlevel) { sData[idx] += sData[idx - 8]; } __SYNC if (4 <= maxlevel) { sData[idx] += sData[idx -16]; } __SYNC #endif return sData[idx] - val; // convert inclusive -> exclusive } //---------------------------------------------------------------------------- // scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using // a warp-scan algorithm //---------------------------------------------------------------------------- __device__ uint4 scan4(uint4 idata) { extern __shared__ uint ptr[]; uint idx = threadIdx.x; uint4 val4 = idata; uint sum[3]; sum[0] = val4.x; sum[1] = val4.y + sum[0]; sum[2] = val4.z + sum[1]; uint val = val4.w + sum[2]; val = scanwarp<uint, 4>(val, ptr); __syncthreads(); if ((idx & (RadixSort::WARP_SIZE - 1)) == RadixSort::WARP_SIZE - 1) { ptr[idx >> 5] = val + val4.w + sum[2]; } __syncthreads(); #ifndef __DEVICE_EMULATION__ if (idx < RadixSort::WARP_SIZE) #endif { ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr); } __syncthreads(); val += ptr[idx >> 5]; val4.x = val; val4.y = val + sum[0]; val4.z = val + sum[1]; val4.w = val + sum[2]; return val4; } //---------------------------------------------------------------------------- // // Rank is the core of the radix sort loop. Given a predicate, it // computes the output position for each thread in an ordering where all // True threads come first, followed by all False threads. // // This version handles 4 predicates per thread; hence, "rank4". // //---------------------------------------------------------------------------- template <int ctasize> __device__ uint4 rank4(uint4 preds) { uint4 address = scan4(preds); __shared__ uint numtrue; if (threadIdx.x == ctasize-1) { numtrue = address.w + preds.w; } __syncthreads(); uint4 rank; uint idx = threadIdx.x << 2; rank.x = (preds.x) ? address.x : numtrue + idx - address.x; rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y; rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z; rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w; return rank; } //---------------------------------------------------------------------------- // Uses rank to sort one bit at a time: Sorts a block according // to bits startbit -> nbits + startbit // // Each thread sorts 4 elements by nbits bits //---------------------------------------------------------------------------- template<uint nbits, uint startbit> __device__ void radixSortBlock(uint4 &key, uint4 &value) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<RadixSort::CTA_SIZE>(lsb); // This arithmetic strides the ranks across 4 CTA_SIZE regions sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE]; __syncthreads(); sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = value.x; sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = value.y; sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = value.z; sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = value.w; __syncthreads(); value.x = sMem1[threadIdx.x]; value.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE]; value.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE]; value.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE]; __syncthreads(); } } __global__ void emptyKernel() {} //---------------------------------------------------------------------------- // // radixSortBlocks sorts all blocks of data independently in shared // memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements // // The radix sort is done in two stages. This stage calls radixSortBlock on each // block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size (fullBlocks) // differently than arrays that are not. "flip" is used to only compile in the // float flip code when float keys are used. "loop" is used when persistent CTAs // are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut, uint4* keysIn, uint4* valuesIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key, value; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; uint *values1 = (uint*)valuesIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; value.x = (idx < numElements) ? values1[idx] : UINT_MAX; value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX; value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX; value.w = UINT_MAX; } } else { key = keysIn[i]; value = valuesIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlock<nbits, startbit>(key, value); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; uint *values1 = (uint*)valuesOut; keys1[idx] = key.x; values1[idx] = value.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; values1[idx + 1] = value.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; values1[idx + 2] = value.z; } } } } else { keysOut[i] = key; valuesOut[i] = value; } if (loop) blockId += gridDim.x; else break; } } //---------------------------------------------------------------------------- // Given an array with blocks sorted according to a 4-bit radix group, each // block counts the number of keys that fall into each radix in the group, and // finds the starting offset of each radix in the block. It then writes the radix // counts to the counters array, and the starting offsets to the blockOffsets array. // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size // (fullBlocks) differently than arrays that are not. "loop" is used when persistent // CTAs are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. // //---------------------------------------------------------------------------- template<uint startbit, bool fullBlocks, bool loop> __global__ void findRadixOffsets(uint2 *keys, uint *counters, uint *blockOffsets, uint numElements, uint totalBlocks) { extern __shared__ uint2 sMem2[]; uint2 *sRadix2 = (uint2*)sMem2; uint *sRadix1 = (uint*) sRadix2; uint *sStartPointers = (uint*)(sMem2 + RadixSort::CTA_SIZE); uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint2 radix2; uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && ((i + 1) << 1 ) > numElements ) { // handle uint1 rather than uint2 for non-full blocks uint *keys1 = (uint*)keys; uint j = i << 1; radix2.x = (j < numElements) ? keys1[j] : UINT_MAX; j++; radix2.y = (j < numElements) ? keys1[j] : UINT_MAX; } else { radix2 = keys[i]; } sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF; sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF; // Finds the position where the sRadix1 entries differ and stores start // index for each radix. if(threadIdx.x < 16) { sStartPointers[threadIdx.x] = 0; } __syncthreads(); if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x; } if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]) { sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE]] = threadIdx.x + RadixSort::CTA_SIZE; } __syncthreads(); if(threadIdx.x < 16) { blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x]; } __syncthreads(); // Compute the sizes of each block. if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x - 1]] = threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]]; } if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1] ) { sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]] = threadIdx.x + RadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]]; } if(threadIdx.x == RadixSort::CTA_SIZE - 1) { sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]] = 2 * RadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]]; } __syncthreads(); if(threadIdx.x < 16) { counters[threadIdx.x * totalBlocks + blockId] = sStartPointers[threadIdx.x]; } if (loop) blockId += gridDim.x; else break; } } //---------------------------------------------------------------------------- // reorderData shuffles data in the array globally after the radix offsets // have been found. On compute version 1.1 and earlier GPUs, this code depends // on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). // // On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures // that all writes are coalesced using extra work in the kernel. On later // GPUs coalescing rules have been relaxed, so this extra overhead hurts // performance. On these GPUs we set manualCoalesce=false and directly store // the results. // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size // (fullBlocks) differently than arrays that are not. "loop" is used when persistent // CTAs are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void reorderData(uint *outKeys, uint *outValues, uint2 *keys, uint2 *values, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[RadixSort::CTA_SIZE]; __shared__ uint2 sValues2[RadixSort::CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint *sValues1 = (uint*)sValues2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint *values1 = (uint*)values; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; sValues2[threadIdx.x] = values[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); outValues[globalOffset] = sValues1[threadIdx.x]; } radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]); outValues[globalOffset] = sValues1[threadIdx.x + RadixSort::CTA_SIZE]; } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); outValues[outOffset] = sValues1[inOffset]; } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } //---------------------------------------------------------------------------- // Perform one step of the radix sort. Sorts by nbits key bits per step, // starting at startbit. // // Uses cudppScan() for the prefix sum of radix counters. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStep(uint *keys, uint *values, uint *tempKeys, uint *tempValues, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements) { const uint eltsPerBlock = RadixSort::CTA_SIZE * 4; const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; bool loop2 = numBlocks2 > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop2 ? 65535 : numBlocks2; uint blocksReorder = loop2 ? 65535 : numBlocks2; uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[0] : persistentCTAThreshold[0]; if (numElements >= threshold) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); loop2 = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = loop ? numCTAs[SORT_KERNEL_RADIX_SORT_BLOCKS] : numBlocks; blocksFind = loop ? numCTAs[SORT_KERNEL_FIND_RADIX_OFFSETS] : numBlocks2; blocksReorder = loop ? numCTAs[SORT_KERNEL_REORDER_DATA] : numBlocks2; // Run an empty kernel -- this seems to reset some of the CTA scheduling hardware // on GT200, resulting in better scheduling and lower run times if (startbit > 0) hipLaunchKernelGGL(( emptyKernel), dim3(numCTAs[SORT_KERNEL_EMPTY]), dim3(RadixSort::CTA_SIZE), 0, 0, ); } if (fullBlocks) { if (loop) { hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, true>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, false>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } else { if (loop) { hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, true>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, false>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } if (fullBlocks) { if (loop2) hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } else { if (loop2) hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } cudppScan(scanPlan, countersSum, counters, 16*numBlocks2); if (fullBlocks) { if (bManualCoalesce) { if (loop2) hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } } else { if (bManualCoalesce) { if (loop2) hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } } checkCudaError("radixSortStep"); } //---------------------------------------------------------------------------- // Optimization for sorts of fewer than 4 * CTA_SIZE elements //---------------------------------------------------------------------------- template <bool flip> void radixSortSingleBlock(uint *keys, uint *values, uint numElements) { bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0); if (fullBlocks) { hipLaunchKernelGGL(( radixSortBlocks<32, 0, true, flip, false>) , dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 1 ); } else { hipLaunchKernelGGL(( radixSortBlocks<32, 0, false, flip, false>) , dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 1 ); } if (flip) hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(RadixSort::CTA_SIZE), 0, 0, keys, numElements); checkCudaError("radixSortSingleBlock"); } //---------------------------------------------------------------------------- // Optimization for sorts of WARP_SIZE or fewer elements //---------------------------------------------------------------------------- template <bool flip> __global__ void radixSortSingleWarp(uint *keys, uint *values, uint numElements) { volatile __shared__ uint sKeys[RadixSort::WARP_SIZE]; volatile __shared__ uint sValues[RadixSort::WARP_SIZE]; volatile __shared__ uint sFlags[RadixSort::WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); sValues[threadIdx.x] = values[threadIdx.x]; __SYNC // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; uint val_i = sValues[i]; sFlags[threadIdx.x] = 0; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { uint temp = sKeys[threadIdx.x]; uint tempval = sValues[threadIdx.x]; sFlags[threadIdx.x] = 1; sKeys[threadIdx.x + 1] = temp; sValues[threadIdx.x + 1] = tempval; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; sValues[threadIdx.x] = val_i; } __SYNC // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); values[threadIdx.x] = sValues[threadIdx.x]; } //---------------------------------------------------------------------------- // Main radix sort function. Sorts in place in the keys and values arrays, // but uses the other device arrays as temporary storage. All pointer // parameters are device pointers. //---------------------------------------------------------------------------- extern "C" void radixSort(uint *keys, uint *values, uint *tempKeys, uint *tempValues, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool flipBits = false) { if(numElements <= RadixSort::WARP_SIZE) { if (flipBits) hipLaunchKernelGGL(( radixSortSingleWarp<true>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements); else hipLaunchKernelGGL(( radixSortSingleWarp<false>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements); checkCudaError("radixSortSingleWarp"); return; } if(numElements <= RadixSort::CTA_SIZE * 4) { if (flipBits) radixSortSingleBlock<true>(keys, values, numElements); else radixSortSingleBlock<false>(keys, values, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 4) { radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 8) { radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 12) { radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 16) { radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 20) { radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 24) { radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } } checkCudaError("radixSort"); } extern "C" void radixSortFloatKeys(float *keys, uint *values, float *tempKeys, uint *tempValues, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool negativeKeys) { radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements, keyBits, negativeKeys); checkCudaError("radixSortFloatKeys"); } //---------------------------------------------------------------------------- // Key-only Sorts //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- // Uses rank to sort one bit at a time: Sorts a block according // to bits startbit -> nbits + startbit //---------------------------------------------------------------------------- template<uint nbits, uint startbit> __device__ void radixSortBlockKeysOnly(uint4 &key) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<256>(lsb); #if 1 // This arithmetic strides the ranks across 4 CTA_SIZE regions sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE]; #else sMem1[r.x] = key.x; sMem1[r.y] = key.y; sMem1[r.z] = key.z; sMem1[r.w] = key.w; __syncthreads(); // This access has 4-way bank conflicts key = sMem[threadIdx.x]; #endif __syncthreads(); } } //---------------------------------------------------------------------------- // // radixSortBlocks sorts all blocks of data independently in shared // memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements // // The radix sort is done in two stages. This stage calls radixSortBlock on each // block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size (fullBlocks) // differently than arrays that are not. "flip" is used to only compile in the // float flip code when float keys are used. "loop" is used when persistent CTAs // are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; } } else { key = keysIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlockKeysOnly<nbits, startbit>(key); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; keys1[idx] = key.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; } } } } else { keysOut[i] = key; } if (loop) blockId += gridDim.x; else break; } } //---------------------------------------------------------------------------- // reorderData shuffles data in the array globally after the radix offsets // have been found. On compute version 1.1 and earlier GPUs, this code depends // on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). // // On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures // that all writes are coalesced using extra work in the kernel. On later // GPUs coalescing rules have been relaxed, so this extra overhead hurts // performance. On these GPUs we set manualCoalesce=false and directly store // the results. // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size // (fullBlocks) differently than arrays that are not. "loop" is used when persistent // CTAs are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void reorderDataKeysOnly(uint *outKeys, uint2 *keys, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[RadixSort::CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); } radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]); } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } //---------------------------------------------------------------------------- // Perform one step of the radix sort. Sorts by nbits key bits per step, // starting at startbit. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStepKeysOnly(uint *keys, uint *tempKeys, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements) { const uint eltsPerBlock = RadixSort::CTA_SIZE * 4; const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; bool loop2 = numBlocks2 > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop2 ? 65535 : numBlocks2; uint blocksReorder = loop2 ? 65535 : numBlocks2; uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[1] : persistentCTAThreshold[1]; if (numElements >= threshold) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); loop2 = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = loop ? numCTAs[SORT_KERNEL_RADIX_SORT_BLOCKS] : numBlocks; blocksFind = loop ? numCTAs[SORT_KERNEL_FIND_RADIX_OFFSETS] : numBlocks2; blocksReorder = loop ? numCTAs[SORT_KERNEL_REORDER_DATA] : numBlocks2; } if (fullBlocks) { if (loop) hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); else hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); } else { if (loop) hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); else hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>) , dim3(blocks), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); } if (fullBlocks) { if (loop2) hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } else { if (loop2) hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>) , dim3(blocksFind), dim3(RadixSort::CTA_SIZE), 3 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } cudppScan(scanPlan, countersSum, counters, 16*numBlocks2); if (fullBlocks) { if (bManualCoalesce) { if (loop2) hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } } else { if (bManualCoalesce) { if (loop2) hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, true>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, false>) , dim3(blocksReorder), dim3(RadixSort::CTA_SIZE), 0, 0, keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } } checkCudaError("radixSortStepKeysOnly"); } //---------------------------------------------------------------------------- // Optimization for sorts of fewer than 4 * CTA_SIZE elements //---------------------------------------------------------------------------- template <bool flip> void radixSortSingleBlockKeysOnly(uint *keys, uint numElements) { bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0); if (fullBlocks) { hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, true, flip, false>) , dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)keys, numElements, 1 ); } else { hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, false, flip, false>) , dim3(1), dim3(RadixSort::CTA_SIZE), 4 * RadixSort::CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)keys, numElements, 1 ); } if (flip) hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(RadixSort::CTA_SIZE), 0, 0, keys, numElements); checkCudaError("radixSortSingleBlock"); } //---------------------------------------------------------------------------- // Optimization for sorts of WARP_SIZE or fewer elements //---------------------------------------------------------------------------- template <bool flip> __global__ void radixSortSingleWarpKeysOnly(uint *keys, uint numElements) { __shared__ uint sKeys[RadixSort::WARP_SIZE]; __shared__ uint sFlags[RadixSort::WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); __SYNC // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; sFlags[threadIdx.x] = 0; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { uint temp = sKeys[threadIdx.x]; sFlags[threadIdx.x] = 1; sKeys[threadIdx.x + 1] = temp; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; } __SYNC // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); } //---------------------------------------------------------------------------- // Main key-only radix sort function. Sorts in place in the keys and values // arrays, but uses the other device arrays as temporary storage. All pointer // parameters are device pointers. Uses cudppScan() for the prefix sum of // radix counters. //---------------------------------------------------------------------------- extern "C" void radixSortKeysOnly(uint *keys, uint *tempKeys, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool flipBits = false) { if(numElements <= RadixSort::WARP_SIZE) { if (flipBits) hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<true>), dim3(1), dim3(numElements), 0, 0, keys, numElements); else hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<false>), dim3(1), dim3(numElements), 0, 0, keys, numElements); checkCudaError("radixSortSingleWarp"); return; } if(numElements <= RadixSort::CTA_SIZE * 4) { if (flipBits) radixSortSingleBlockKeysOnly<true>(keys, numElements); else radixSortSingleBlockKeysOnly<false>(keys, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 4) { radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 8) { radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 12) { radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 16) { radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 20) { radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 24) { radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } } checkCudaError("radixSortKeysOnly"); } //---------------------------------------------------------------------------- // Main float key-only radix sort function. Sorts in place in the keys and values // arrays, but uses the other device arrays as temporary storage. All pointer // parameters are device pointers. Uses cudppScan() for the prefix sum of // radix counters. //---------------------------------------------------------------------------- extern "C" void radixSortFloatKeysOnly(float *keys, float *tempKeys, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool negativeKeys) { radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements, keyBits, negativeKeys); checkCudaError("radixSortFloatKeys"); }
4400cb0be36466c31c9c6bfd2513b68be14885a1.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ // ----------------------------------------------------------------------- // Fast CUDA Radix Sort Implementation // // The parallel radix sort algorithm implemented by this code is described // in the following paper. // // Satish, N., Harris, M., and Garland, M. "Designing Efficient Sorting // Algorithms for Manycore GPUs". In Proceedings of IEEE International // Parallel & Distributed Processing Symposium 2009 (IPDPS 2009). // // ----------------------------------------------------------------------- #include "radixsort.h" #include "cudpp/cudpp.h" #include <algorithm> #include <stdio.h> enum kernelName { SORT_KERNEL_EMPTY, SORT_KERNEL_RADIX_SORT_BLOCKS, SORT_KERNEL_RADIX_SORT_BLOCKS_KEYSONLY, SORT_KERNEL_FIND_RADIX_OFFSETS, SORT_KERNEL_REORDER_DATA, SORT_KERNEL_REORDER_DATA_KEYSONLY, SORT_KERNEL_COUNT, }; bool bManualCoalesce = false; unsigned int numCTAs[SORT_KERNEL_COUNT] = { 0, 0, 0, 0, 0, 0 }; unsigned int numSMs = 0; unsigned int persistentCTAThreshold[2] = { 0, 0 }; unsigned int persistentCTAThresholdFullBlocks[2] = { 0, 0 }; // Replace <stl> library min/max equivalents with MIN/MAX #define MIN(a,b) (a < b ? a : b) #define MAX(a,b) (a > b ? a : b) extern "C" void initDeviceParameters() { int deviceID = -1; if (cudaSuccess == cudaGetDevice(&deviceID)) { cudaDeviceProp devprop; cudaGetDeviceProperties(&devprop, deviceID); // sm_12 and later devices don't need help with coalesce bManualCoalesce = (devprop.major < 2 && devprop.minor < 2); // Empirically we have found that for some (usually larger) sort // sizes it is better to use exactly as many "persistent" CTAs // as can fill the GPU, which loop over the "blocks" of work. For smaller // arrays it is better to use the typical CUDA approach of launching one CTA // per block of work. // 0-element of these two-element arrays is for key-value sorts // 1-element is for key-only sorts persistentCTAThreshold[0] = bManualCoalesce ? 16777216 : 524288; persistentCTAThresholdFullBlocks[0] = bManualCoalesce ? 2097152: 524288; persistentCTAThreshold[1] = bManualCoalesce ? 16777216 : 8388608; persistentCTAThresholdFullBlocks[1] = bManualCoalesce ? 2097152: 0; // Determine the maximum number of CTAs that can be run simultaneously for each kernel // This is equivalent to the calculation done in the CUDA Occupancy Calculator spreadsheet unsigned regAllocationUnit = (devprop.major < 2 && devprop.minor < 2) ? 256 : 512; // in registers unsigned smemAllocationUnit = 512; // in bytes unsigned maxThreadsPerSM = bManualCoalesce ? 768 : 1024; // sm_12 GPUs increase threads/SM to 1024 // These values were obtained using --ptxas-options=-v // Note on compute version 1.1 and earlier (sm_11) GPUs the code // must be compiled with -maxrregcount=32 (that's a good idea in general) unsigned regsPerThread[SORT_KERNEL_COUNT] = {0, 32, 28, 8, 14, 13}; unsigned sharedMemBytes[SORT_KERNEL_COUNT] = {32, 4144, 4144, 3120, 4352, 2288}; numSMs = devprop.multiProcessorCount; for (int i = 0; i < SORT_KERNEL_COUNT; ++i) { size_t regsPerCTA = regsPerThread[i] * RadixSort::CTA_SIZE; regsPerCTA += (regsPerCTA % regAllocationUnit); // round up to nearest allocation unit regsPerCTA = MAX(regsPerCTA, regAllocationUnit); // ensure we round up very small amounts correctly size_t ctaLimitRegs = (unsigned)devprop.regsPerBlock / regsPerCTA; size_t smemPerCTA = MAX(smemAllocationUnit, sharedMemBytes[i] + (sharedMemBytes[i] % smemAllocationUnit)); size_t ctaLimitSMem = devprop.sharedMemPerBlock / smemPerCTA; size_t ctaLimitThreads = maxThreadsPerSM / RadixSort::CTA_SIZE; numCTAs[i] = numSMs * MIN(ctaLimitRegs, MIN(ctaLimitSMem, MIN(ctaLimitThreads, 8))); } } } // In emulationmode, we need __syncthreads() inside warp-synchronous code, // but we don't in code running on the GPU, so we define this macro to use // in the warp-scan portion of the radix sort (see CUDPP for information // on the warp scan algorithm. #ifdef __DEVICE_EMULATION__ #define __SYNC __syncthreads(); #else #define __SYNC #endif typedef unsigned int uint; extern "C" void checkCudaError(const char *msg) { #if defined(_DEBUG) || defined(DEBUG) cudaError_t e = cudaThreadSynchronize(); if( e != cudaSuccess ) { fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e)); exit(EXIT_FAILURE); } e = cudaGetLastError(); if( e != cudaSuccess ) { fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e)); exit(EXIT_FAILURE); } #endif } // ----------------------------------------------------------------------------------------------- // The floatFlip and floatUnflip functions below are based on code in the web article // "Radix Tricks" by Michael Herf (http://www.stereopsis.com/radix.html). They are used to convert // floating point values into sortable unsigned integers (and back). // // Paraphrasing Michael: Binary single-precision floating point numbers have two features that // keep them from being directly sortable. First, the sign bit is set when the value is negative, // which means that all negative numbers are bigger than positive ones. Second, the values are // signed-magnitude, so "more negative" floating point numbers actually look bigger to a normal // bitwise comparison. // // "To fix our floating point numbers, we define the following rules: // // 1. Always flip the sign bit. // 2. If the sign bit was set, flip the other bits too. // // To get back, we flip the sign bit always, and if the sign bit was not set, we flip the other // bits too." // // This is a very inexpensive operation and it is only done on the first and last steps of the // sort. // ----------------------------------------------------------------------------------------------- // ================================================================================================ // Flip a float for sorting // finds SIGN of fp number. // if it's 1 (negative float), it flips all bits // if it's 0 (positive float), it flips the sign only // ================================================================================================ template <bool doFlip> __device__ uint floatFlip(uint f) { if (doFlip) { uint mask = -int(f >> 31) | 0x80000000; return f ^ mask; } else return f; } // ================================================================================================ // flip a float back (invert FloatFlip) // signed was flipped from above, so: // if sign is 1 (negative), it flips the sign bit back // if sign is 0 (positive), it flips all bits back // ================================================================================================ template <bool doFlip> __device__ uint floatUnflip(uint f) { if (doFlip) { uint mask = ((f >> 31) - 1) | 0x80000000; return f ^ mask; } else return f; } // ================================================================================================ // Kernel to flip all floats in an array (see floatFlip, above) // Each thread flips four values (each 256-thread CTA flips 1024 values). // ================================================================================================ __global__ void flipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); } // ================================================================================================ // Kernel to unflip all floats in an array (see floatUnflip, above) // Each thread unflips four values (each 256-thread CTA unflips 1024 values). // ================================================================================================ __global__ void unflipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); } //---------------------------------------------------------------------------- // Scans each warp in parallel ("warp-scan"), one element per thread. // uses 2 numElements of shared memory per thread (64 = elements per warp) //---------------------------------------------------------------------------- template<class T, int maxlevel> __device__ T scanwarp(T val, T* sData) { // The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp = // 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1)) int idx = 2 * threadIdx.x - (threadIdx.x & (RadixSort::WARP_SIZE - 1)); sData[idx] = 0; idx += RadixSort::WARP_SIZE; sData[idx] = val; __SYNC #ifdef __DEVICE_EMULATION__ T t = sData[idx - 1]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 2]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 4]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 8]; __SYNC sData[idx] += t; __SYNC t = sData[idx - 16]; __SYNC sData[idx] += t; __SYNC #else if (0 <= maxlevel) { sData[idx] += sData[idx - 1]; } __SYNC if (1 <= maxlevel) { sData[idx] += sData[idx - 2]; } __SYNC if (2 <= maxlevel) { sData[idx] += sData[idx - 4]; } __SYNC if (3 <= maxlevel) { sData[idx] += sData[idx - 8]; } __SYNC if (4 <= maxlevel) { sData[idx] += sData[idx -16]; } __SYNC #endif return sData[idx] - val; // convert inclusive -> exclusive } //---------------------------------------------------------------------------- // scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using // a warp-scan algorithm //---------------------------------------------------------------------------- __device__ uint4 scan4(uint4 idata) { extern __shared__ uint ptr[]; uint idx = threadIdx.x; uint4 val4 = idata; uint sum[3]; sum[0] = val4.x; sum[1] = val4.y + sum[0]; sum[2] = val4.z + sum[1]; uint val = val4.w + sum[2]; val = scanwarp<uint, 4>(val, ptr); __syncthreads(); if ((idx & (RadixSort::WARP_SIZE - 1)) == RadixSort::WARP_SIZE - 1) { ptr[idx >> 5] = val + val4.w + sum[2]; } __syncthreads(); #ifndef __DEVICE_EMULATION__ if (idx < RadixSort::WARP_SIZE) #endif { ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr); } __syncthreads(); val += ptr[idx >> 5]; val4.x = val; val4.y = val + sum[0]; val4.z = val + sum[1]; val4.w = val + sum[2]; return val4; } //---------------------------------------------------------------------------- // // Rank is the core of the radix sort loop. Given a predicate, it // computes the output position for each thread in an ordering where all // True threads come first, followed by all False threads. // // This version handles 4 predicates per thread; hence, "rank4". // //---------------------------------------------------------------------------- template <int ctasize> __device__ uint4 rank4(uint4 preds) { uint4 address = scan4(preds); __shared__ uint numtrue; if (threadIdx.x == ctasize-1) { numtrue = address.w + preds.w; } __syncthreads(); uint4 rank; uint idx = threadIdx.x << 2; rank.x = (preds.x) ? address.x : numtrue + idx - address.x; rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y; rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z; rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w; return rank; } //---------------------------------------------------------------------------- // Uses rank to sort one bit at a time: Sorts a block according // to bits startbit -> nbits + startbit // // Each thread sorts 4 elements by nbits bits //---------------------------------------------------------------------------- template<uint nbits, uint startbit> __device__ void radixSortBlock(uint4 &key, uint4 &value) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<RadixSort::CTA_SIZE>(lsb); // This arithmetic strides the ranks across 4 CTA_SIZE regions sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE]; __syncthreads(); sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = value.x; sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = value.y; sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = value.z; sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = value.w; __syncthreads(); value.x = sMem1[threadIdx.x]; value.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE]; value.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE]; value.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE]; __syncthreads(); } } __global__ void emptyKernel() {} //---------------------------------------------------------------------------- // // radixSortBlocks sorts all blocks of data independently in shared // memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements // // The radix sort is done in two stages. This stage calls radixSortBlock on each // block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size (fullBlocks) // differently than arrays that are not. "flip" is used to only compile in the // float flip code when float keys are used. "loop" is used when persistent CTAs // are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut, uint4* keysIn, uint4* valuesIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key, value; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; uint *values1 = (uint*)valuesIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; value.x = (idx < numElements) ? values1[idx] : UINT_MAX; value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX; value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX; value.w = UINT_MAX; } } else { key = keysIn[i]; value = valuesIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlock<nbits, startbit>(key, value); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; uint *values1 = (uint*)valuesOut; keys1[idx] = key.x; values1[idx] = value.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; values1[idx + 1] = value.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; values1[idx + 2] = value.z; } } } } else { keysOut[i] = key; valuesOut[i] = value; } if (loop) blockId += gridDim.x; else break; } } //---------------------------------------------------------------------------- // Given an array with blocks sorted according to a 4-bit radix group, each // block counts the number of keys that fall into each radix in the group, and // finds the starting offset of each radix in the block. It then writes the radix // counts to the counters array, and the starting offsets to the blockOffsets array. // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size // (fullBlocks) differently than arrays that are not. "loop" is used when persistent // CTAs are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. // //---------------------------------------------------------------------------- template<uint startbit, bool fullBlocks, bool loop> __global__ void findRadixOffsets(uint2 *keys, uint *counters, uint *blockOffsets, uint numElements, uint totalBlocks) { extern __shared__ uint2 sMem2[]; uint2 *sRadix2 = (uint2*)sMem2; uint *sRadix1 = (uint*) sRadix2; uint *sStartPointers = (uint*)(sMem2 + RadixSort::CTA_SIZE); uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint2 radix2; uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && ((i + 1) << 1 ) > numElements ) { // handle uint1 rather than uint2 for non-full blocks uint *keys1 = (uint*)keys; uint j = i << 1; radix2.x = (j < numElements) ? keys1[j] : UINT_MAX; j++; radix2.y = (j < numElements) ? keys1[j] : UINT_MAX; } else { radix2 = keys[i]; } sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF; sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF; // Finds the position where the sRadix1 entries differ and stores start // index for each radix. if(threadIdx.x < 16) { sStartPointers[threadIdx.x] = 0; } __syncthreads(); if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x; } if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]) { sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE]] = threadIdx.x + RadixSort::CTA_SIZE; } __syncthreads(); if(threadIdx.x < 16) { blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x]; } __syncthreads(); // Compute the sizes of each block. if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x - 1]] = threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]]; } if(sRadix1[threadIdx.x + RadixSort::CTA_SIZE] != sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1] ) { sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]] = threadIdx.x + RadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + RadixSort::CTA_SIZE - 1]]; } if(threadIdx.x == RadixSort::CTA_SIZE - 1) { sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]] = 2 * RadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * RadixSort::CTA_SIZE - 1]]; } __syncthreads(); if(threadIdx.x < 16) { counters[threadIdx.x * totalBlocks + blockId] = sStartPointers[threadIdx.x]; } if (loop) blockId += gridDim.x; else break; } } //---------------------------------------------------------------------------- // reorderData shuffles data in the array globally after the radix offsets // have been found. On compute version 1.1 and earlier GPUs, this code depends // on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). // // On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures // that all writes are coalesced using extra work in the kernel. On later // GPUs coalescing rules have been relaxed, so this extra overhead hurts // performance. On these GPUs we set manualCoalesce=false and directly store // the results. // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size // (fullBlocks) differently than arrays that are not. "loop" is used when persistent // CTAs are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void reorderData(uint *outKeys, uint *outValues, uint2 *keys, uint2 *values, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[RadixSort::CTA_SIZE]; __shared__ uint2 sValues2[RadixSort::CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint *sValues1 = (uint*)sValues2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint *values1 = (uint*)values; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; sValues2[threadIdx.x] = values[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); outValues[globalOffset] = sValues1[threadIdx.x]; } radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]); outValues[globalOffset] = sValues1[threadIdx.x + RadixSort::CTA_SIZE]; } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); outValues[outOffset] = sValues1[inOffset]; } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } //---------------------------------------------------------------------------- // Perform one step of the radix sort. Sorts by nbits key bits per step, // starting at startbit. // // Uses cudppScan() for the prefix sum of radix counters. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStep(uint *keys, uint *values, uint *tempKeys, uint *tempValues, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements) { const uint eltsPerBlock = RadixSort::CTA_SIZE * 4; const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; bool loop2 = numBlocks2 > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop2 ? 65535 : numBlocks2; uint blocksReorder = loop2 ? 65535 : numBlocks2; uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[0] : persistentCTAThreshold[0]; if (numElements >= threshold) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); loop2 = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = loop ? numCTAs[SORT_KERNEL_RADIX_SORT_BLOCKS] : numBlocks; blocksFind = loop ? numCTAs[SORT_KERNEL_FIND_RADIX_OFFSETS] : numBlocks2; blocksReorder = loop ? numCTAs[SORT_KERNEL_REORDER_DATA] : numBlocks2; // Run an empty kernel -- this seems to reset some of the CTA scheduling hardware // on GT200, resulting in better scheduling and lower run times if (startbit > 0) emptyKernel<<<numCTAs[SORT_KERNEL_EMPTY], RadixSort::CTA_SIZE>>>(); } if (fullBlocks) { if (loop) { radixSortBlocks<nbits, startbit, true, flip, true> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { radixSortBlocks<nbits, startbit, true, flip, false> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } else { if (loop) { radixSortBlocks<nbits, startbit, false, flip, true> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { radixSortBlocks<nbits, startbit, false, flip, false> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } if (fullBlocks) { if (loop2) findRadixOffsets<startbit, true, true> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else findRadixOffsets<startbit, true, false> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } else { if (loop2) findRadixOffsets<startbit, false, true> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else findRadixOffsets<startbit, false, false> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } cudppScan(scanPlan, countersSum, counters, 16*numBlocks2); if (fullBlocks) { if (bManualCoalesce) { if (loop2) reorderData<startbit, true, true, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderData<startbit, true, true, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) reorderData<startbit, true, false, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderData<startbit, true, false, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } } else { if (bManualCoalesce) { if (loop2) reorderData<startbit, false, true, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderData<startbit, false, true, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) reorderData<startbit, false, false, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderData<startbit, false, false, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, values, (uint2*)tempKeys, (uint2*)tempValues, blockOffsets, countersSum, counters, numElements, numBlocks2); } } checkCudaError("radixSortStep"); } //---------------------------------------------------------------------------- // Optimization for sorts of fewer than 4 * CTA_SIZE elements //---------------------------------------------------------------------------- template <bool flip> void radixSortSingleBlock(uint *keys, uint *values, uint numElements) { bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0); if (fullBlocks) { radixSortBlocks<32, 0, true, flip, false> <<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 1 ); } else { radixSortBlocks<32, 0, false, flip, false> <<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 1 ); } if (flip) unflipFloats<<<1, RadixSort::CTA_SIZE>>>(keys, numElements); checkCudaError("radixSortSingleBlock"); } //---------------------------------------------------------------------------- // Optimization for sorts of WARP_SIZE or fewer elements //---------------------------------------------------------------------------- template <bool flip> __global__ void radixSortSingleWarp(uint *keys, uint *values, uint numElements) { volatile __shared__ uint sKeys[RadixSort::WARP_SIZE]; volatile __shared__ uint sValues[RadixSort::WARP_SIZE]; volatile __shared__ uint sFlags[RadixSort::WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); sValues[threadIdx.x] = values[threadIdx.x]; __SYNC // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; uint val_i = sValues[i]; sFlags[threadIdx.x] = 0; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { uint temp = sKeys[threadIdx.x]; uint tempval = sValues[threadIdx.x]; sFlags[threadIdx.x] = 1; sKeys[threadIdx.x + 1] = temp; sValues[threadIdx.x + 1] = tempval; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; sValues[threadIdx.x] = val_i; } __SYNC // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); values[threadIdx.x] = sValues[threadIdx.x]; } //---------------------------------------------------------------------------- // Main radix sort function. Sorts in place in the keys and values arrays, // but uses the other device arrays as temporary storage. All pointer // parameters are device pointers. //---------------------------------------------------------------------------- extern "C" void radixSort(uint *keys, uint *values, uint *tempKeys, uint *tempValues, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool flipBits = false) { if(numElements <= RadixSort::WARP_SIZE) { if (flipBits) radixSortSingleWarp<true><<<1, numElements>>>(keys, values, numElements); else radixSortSingleWarp<false><<<1, numElements>>>(keys, values, numElements); checkCudaError("radixSortSingleWarp"); return; } if(numElements <= RadixSort::CTA_SIZE * 4) { if (flipBits) radixSortSingleBlock<true>(keys, values, numElements); else radixSortSingleBlock<false>(keys, values, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 4) { radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 8) { radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 12) { radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 16) { radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 20) { radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 24) { radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements); } } checkCudaError("radixSort"); } extern "C" void radixSortFloatKeys(float *keys, uint *values, float *tempKeys, uint *tempValues, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool negativeKeys) { radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters, countersSum, blockOffsets, scanPlan, numElements, keyBits, negativeKeys); checkCudaError("radixSortFloatKeys"); } //---------------------------------------------------------------------------- // Key-only Sorts //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- // Uses rank to sort one bit at a time: Sorts a block according // to bits startbit -> nbits + startbit //---------------------------------------------------------------------------- template<uint nbits, uint startbit> __device__ void radixSortBlockKeysOnly(uint4 &key) { extern __shared__ uint sMem1[]; for(uint shift = startbit; shift < (startbit + nbits); ++shift) { uint4 lsb; lsb.x = !((key.x >> shift) & 0x1); lsb.y = !((key.y >> shift) & 0x1); lsb.z = !((key.z >> shift) & 0x1); lsb.w = !((key.w >> shift) & 0x1); uint4 r = rank4<256>(lsb); #if 1 // This arithmetic strides the ranks across 4 CTA_SIZE regions sMem1[(r.x & 3) * RadixSort::CTA_SIZE + (r.x >> 2)] = key.x; sMem1[(r.y & 3) * RadixSort::CTA_SIZE + (r.y >> 2)] = key.y; sMem1[(r.z & 3) * RadixSort::CTA_SIZE + (r.z >> 2)] = key.z; sMem1[(r.w & 3) * RadixSort::CTA_SIZE + (r.w >> 2)] = key.w; __syncthreads(); // The above allows us to read without 4-way bank conflicts: key.x = sMem1[threadIdx.x]; key.y = sMem1[threadIdx.x + RadixSort::CTA_SIZE]; key.z = sMem1[threadIdx.x + 2 * RadixSort::CTA_SIZE]; key.w = sMem1[threadIdx.x + 3 * RadixSort::CTA_SIZE]; #else sMem1[r.x] = key.x; sMem1[r.y] = key.y; sMem1[r.z] = key.z; sMem1[r.w] = key.w; __syncthreads(); // This access has 4-way bank conflicts key = sMem[threadIdx.x]; #endif __syncthreads(); } } //---------------------------------------------------------------------------- // // radixSortBlocks sorts all blocks of data independently in shared // memory. Each thread block (CTA) sorts one block of 4*CTA_SIZE elements // // The radix sort is done in two stages. This stage calls radixSortBlock on each // block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size (fullBlocks) // differently than arrays that are not. "flip" is used to only compile in the // float flip code when float keys are used. "loop" is used when persistent CTAs // are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; } } else { key = keysIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlockKeysOnly<nbits, startbit>(key); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; keys1[idx] = key.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; } } } } else { keysOut[i] = key; } if (loop) blockId += gridDim.x; else break; } } //---------------------------------------------------------------------------- // reorderData shuffles data in the array globally after the radix offsets // have been found. On compute version 1.1 and earlier GPUs, this code depends // on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). // // On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures // that all writes are coalesced using extra work in the kernel. On later // GPUs coalescing rules have been relaxed, so this extra overhead hurts // performance. On these GPUs we set manualCoalesce=false and directly store // the results. // // Template parameters are used to generate efficient code for various special cases // For example, we have to handle arrays that are a multiple of the block size // (fullBlocks) differently than arrays that are not. "loop" is used when persistent // CTAs are used. // // By persistent CTAs we mean that we launch only as many thread blocks as can // be resident in the GPU and no more, rather than launching as many threads as // we have elements. Persistent CTAs loop over blocks of elements until all work // is complete. This can be faster in some cases. In our tests it is faster // for large sorts (and the threshold is higher on compute version 1.1 and earlier // GPUs than it is on compute version 2.0 GPUs. //---------------------------------------------------------------------------- template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void reorderDataKeysOnly(uint *outKeys, uint2 *keys, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[RadixSort::CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); } radix = (sKeys1[threadIdx.x + RadixSort::CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + RadixSort::CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + RadixSort::CTA_SIZE]); } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } //---------------------------------------------------------------------------- // Perform one step of the radix sort. Sorts by nbits key bits per step, // starting at startbit. //---------------------------------------------------------------------------- template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStepKeysOnly(uint *keys, uint *tempKeys, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements) { const uint eltsPerBlock = RadixSort::CTA_SIZE * 4; const uint eltsPerBlock2 = RadixSort::CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; bool loop2 = numBlocks2 > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop2 ? 65535 : numBlocks2; uint blocksReorder = loop2 ? 65535 : numBlocks2; uint threshold = fullBlocks ? persistentCTAThresholdFullBlocks[1] : persistentCTAThreshold[1]; if (numElements >= threshold) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); loop2 = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = loop ? numCTAs[SORT_KERNEL_RADIX_SORT_BLOCKS] : numBlocks; blocksFind = loop ? numCTAs[SORT_KERNEL_FIND_RADIX_OFFSETS] : numBlocks2; blocksReorder = loop ? numCTAs[SORT_KERNEL_REORDER_DATA] : numBlocks2; } if (fullBlocks) { if (loop) radixSortBlocksKeysOnly<nbits, startbit, true, flip, true> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); else radixSortBlocksKeysOnly<nbits, startbit, true, flip, false> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); } else { if (loop) radixSortBlocksKeysOnly<nbits, startbit, false, flip, true> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); else radixSortBlocksKeysOnly<nbits, startbit, false, flip, false> <<<blocks, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)tempKeys, (uint4*)keys, numElements, numBlocks); } if (fullBlocks) { if (loop2) findRadixOffsets<startbit, true, true> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else findRadixOffsets<startbit, true, false> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } else { if (loop2) findRadixOffsets<startbit, false, true> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); else findRadixOffsets<startbit, false, false> <<<blocksFind, RadixSort::CTA_SIZE, 3 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2); } cudppScan(scanPlan, countersSum, counters, 16*numBlocks2); if (fullBlocks) { if (bManualCoalesce) { if (loop2) reorderDataKeysOnly<startbit, true, true, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderDataKeysOnly<startbit, true, true, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) reorderDataKeysOnly<startbit, true, false, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderDataKeysOnly<startbit, true, false, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } } else { if (bManualCoalesce) { if (loop2) reorderDataKeysOnly<startbit, false, true, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderDataKeysOnly<startbit, false, true, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } else { if (loop2) reorderDataKeysOnly<startbit, false, false, unflip, true> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); else reorderDataKeysOnly<startbit, false, false, unflip, false> <<<blocksReorder, RadixSort::CTA_SIZE>>> (keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2); } } checkCudaError("radixSortStepKeysOnly"); } //---------------------------------------------------------------------------- // Optimization for sorts of fewer than 4 * CTA_SIZE elements //---------------------------------------------------------------------------- template <bool flip> void radixSortSingleBlockKeysOnly(uint *keys, uint numElements) { bool fullBlocks = (numElements % (RadixSort::CTA_SIZE * 4) == 0); if (fullBlocks) { radixSortBlocksKeysOnly<32, 0, true, flip, false> <<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)keys, numElements, 1 ); } else { radixSortBlocksKeysOnly<32, 0, false, flip, false> <<<1, RadixSort::CTA_SIZE, 4 * RadixSort::CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)keys, numElements, 1 ); } if (flip) unflipFloats<<<1, RadixSort::CTA_SIZE>>>(keys, numElements); checkCudaError("radixSortSingleBlock"); } //---------------------------------------------------------------------------- // Optimization for sorts of WARP_SIZE or fewer elements //---------------------------------------------------------------------------- template <bool flip> __global__ void radixSortSingleWarpKeysOnly(uint *keys, uint numElements) { __shared__ uint sKeys[RadixSort::WARP_SIZE]; __shared__ uint sFlags[RadixSort::WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); __SYNC // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; sFlags[threadIdx.x] = 0; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { uint temp = sKeys[threadIdx.x]; sFlags[threadIdx.x] = 1; sKeys[threadIdx.x + 1] = temp; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; } __SYNC // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); } //---------------------------------------------------------------------------- // Main key-only radix sort function. Sorts in place in the keys and values // arrays, but uses the other device arrays as temporary storage. All pointer // parameters are device pointers. Uses cudppScan() for the prefix sum of // radix counters. //---------------------------------------------------------------------------- extern "C" void radixSortKeysOnly(uint *keys, uint *tempKeys, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool flipBits = false) { if(numElements <= RadixSort::WARP_SIZE) { if (flipBits) radixSortSingleWarpKeysOnly<true><<<1, numElements>>>(keys, numElements); else radixSortSingleWarpKeysOnly<false><<<1, numElements>>>(keys, numElements); checkCudaError("radixSortSingleWarp"); return; } if(numElements <= RadixSort::CTA_SIZE * 4) { if (flipBits) radixSortSingleBlockKeysOnly<true>(keys, numElements); else radixSortSingleBlockKeysOnly<false>(keys, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 4) { radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 8) { radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 12) { radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 16) { radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 20) { radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 24) { radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } else { radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements); } } checkCudaError("radixSortKeysOnly"); } //---------------------------------------------------------------------------- // Main float key-only radix sort function. Sorts in place in the keys and values // arrays, but uses the other device arrays as temporary storage. All pointer // parameters are device pointers. Uses cudppScan() for the prefix sum of // radix counters. //---------------------------------------------------------------------------- extern "C" void radixSortFloatKeysOnly(float *keys, float *tempKeys, uint *counters, uint *countersSum, uint *blockOffsets, CUDPPHandle scanPlan, uint numElements, uint keyBits, bool negativeKeys) { radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets, scanPlan, numElements, keyBits, negativeKeys); checkCudaError("radixSortFloatKeys"); }
c2f325be8fe3f533bd6e25f62292d0d63a65b412.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Reads a cell at (x+dx, y+dy) __device__ int read_cell(int * source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y) { x = (unsigned int)(x + dx) % domain_x; // Wrap around y = (unsigned int)(y + dy) % domain_y; return source_domain[y * domain_x + x]; } // Compute kernel __global__ void life_kernel(int * source_domain, int * dest_domain, int domain_x, int domain_y) { extern __shared__ sdata[]; int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y; sdata[ty * domain_x + tx] = source_domain[ty * domain_x + tx]; __syncthreads(); // Read cell int myself = read_cell(sdata, tx, ty, 0, 0, domain_x, domain_y); int blue = 0; // Nombre de pion bleu int red = 0; // Nombre de pion rouge int total = 0; // Nombre total de voisin int current; // Position initial du voisin i int dx; // Decalage en x int dy; // Decalage en y // TODO: Read the 8 neighbors and count number of blue and red //TODO: NE SUPPRIME RIEN - ERWAN for(int x = 0; x < 3; x++) { dx = (tx + x) % domain_x; for(int y = 0; y < 3; y++) { dy = (ty + y) % domain_y; if(dy != 0 ||dx != 0) { current = read_cell(sdata, tx, ty, dx, dy, domain_x, domain_y); if(current == 2) { blue++; } else if(current == 1){ red++; } } } } __syncthreads(); total = blue + red; // TODO: Compute new value // TODO: NE SUPPRIME TOUJOURS RIEN - ERWAN if(total < 2 ||total > 3 ||(myself == 0&&total != 3)) {//Je meurs !!!!!!! myself = 0; } else {// Je vie if(myself == 0) { myself = (red > blue) ? 1 : 2; } } // TODO: Write it in dest_domain // TODO: NE SUPPRIME TOUJOURS RIEN - ERWAN dest_domain[ty * domain_x + tx] = myself; }
c2f325be8fe3f533bd6e25f62292d0d63a65b412.cu
// Reads a cell at (x+dx, y+dy) __device__ int read_cell(int * source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y) { x = (unsigned int)(x + dx) % domain_x; // Wrap around y = (unsigned int)(y + dy) % domain_y; return source_domain[y * domain_x + x]; } // Compute kernel __global__ void life_kernel(int * source_domain, int * dest_domain, int domain_x, int domain_y) { extern __shared__ sdata[]; int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y; sdata[ty * domain_x + tx] = source_domain[ty * domain_x + tx]; __syncthreads(); // Read cell int myself = read_cell(sdata, tx, ty, 0, 0, domain_x, domain_y); int blue = 0; // Nombre de pion bleu int red = 0; // Nombre de pion rouge int total = 0; // Nombre total de voisin int current; // Position initial du voisin i int dx; // Decalage en x int dy; // Decalage en y // TODO: Read the 8 neighbors and count number of blue and red //TODO: NE SUPPRIME RIEN - ERWAN for(int x = 0; x < 3; x++) { dx = (tx + x) % domain_x; for(int y = 0; y < 3; y++) { dy = (ty + y) % domain_y; if(dy != 0 || dx != 0) { current = read_cell(sdata, tx, ty, dx, dy, domain_x, domain_y); if(current == 2) { blue++; } else if(current == 1){ red++; } } } } __syncthreads(); total = blue + red; // TODO: Compute new value // TODO: NE SUPPRIME TOUJOURS RIEN - ERWAN if(total < 2 ||total > 3 ||(myself == 0&&total != 3)) {//Je meurs !!!!!!! myself = 0; } else {// Je vie if(myself == 0) { myself = (red > blue) ? 1 : 2; } } // TODO: Write it in dest_domain // TODO: NE SUPPRIME TOUJOURS RIEN - ERWAN dest_domain[ty * domain_x + tx] = myself; }
192659b846255d584f38229662a5862e4f044625.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <iostream> #include <vector> #include <cuml/manifold/umapparams.h> #include <datasets/digits.h> #include <raft/cudart_utils.h> #include <cuml/common/cuml_allocator.hpp> #include <cuml/common/device_buffer.hpp> #include <cuml/cuml.hpp> #include <cuml/datasets/make_blobs.hpp> #include <cuml/manifold/umap.hpp> #include <cuml/neighbors/knn.hpp> #include <distance/distance.cuh> #include <linalg/reduce_rows_by_key.cuh> #include <metrics/trustworthiness.cuh> #include <raft/cuda_utils.cuh> #include <selection/knn.cuh> #include <umap/runner.cuh> using namespace ML; using namespace ML::Metrics; using namespace std; using namespace MLCommon; using namespace MLCommon::Distance; using namespace MLCommon::Datasets::Digits; template <typename T> __global__ void has_nan_kernel(T* data, size_t len, bool* answer) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; bool val = data[tid]; if (val != val) { *answer = true; } } template <typename T> bool has_nan(T* data, size_t len, std::shared_ptr<deviceAllocator> alloc, hipStream_t stream) { dim3 blk(256); dim3 grid(raft::ceildiv(len, (size_t)blk.x)); bool h_answer = false; device_buffer<bool> d_answer(alloc, stream, 1); raft::update_device(d_answer.data(), &h_answer, 1, stream); hipLaunchKernelGGL(( has_nan_kernel), dim3(grid), dim3(blk), 0, stream, data, len, d_answer.data()); raft::update_host(&h_answer, d_answer.data(), 1, stream); CUDA_CHECK(hipStreamSynchronize(stream)); return h_answer; } template <typename T> __global__ void are_equal_kernel(T* embedding1, T* embedding2, size_t len, double* diff) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (embedding1[tid] != embedding2[tid]) { atomicAdd(diff, abs(embedding1[tid] - embedding2[tid])); } } template <typename T> bool are_equal(T* embedding1, T* embedding2, size_t len, std::shared_ptr<deviceAllocator> alloc, hipStream_t stream) { double h_answer = 0.; device_buffer<double> d_answer(alloc, stream, 1); raft::update_device(d_answer.data(), &h_answer, 1, stream); hipLaunchKernelGGL(( are_equal_kernel), dim3(raft::ceildiv(len, (size_t)32)), dim3(32), 0, stream, embedding1, embedding2, len, d_answer.data()); raft::update_host(&h_answer, d_answer.data(), 1, stream); CUDA_CHECK(hipStreamSynchronize(stream)); double tolerance = 1.0; if (h_answer > tolerance) { std::cout << "Not equal, difference : " << h_answer << std::endl; return false; } return true; } class UMAPParametrizableTest : public ::testing::Test { protected: struct TestParams { bool fit_transform; bool supervised; bool knn_params; int n_samples; int n_features; int n_clusters; double min_trustworthiness; }; void get_embedding(raft::handle_t& handle, float* X, float* y, float* embedding_ptr, TestParams& test_params, UMAPParams& umap_params) { hipStream_t stream = handle.get_stream(); auto alloc = handle.get_device_allocator(); int& n_samples = test_params.n_samples; int& n_features = test_params.n_features; device_buffer<int64_t>* knn_indices_b; device_buffer<float>* knn_dists_b; int64_t* knn_indices = nullptr; float* knn_dists = nullptr; if (test_params.knn_params) { knn_indices_b = new device_buffer<int64_t>( alloc, stream, n_samples * umap_params.n_neighbors); knn_dists_b = new device_buffer<float>( alloc, stream, n_samples * umap_params.n_neighbors); knn_indices = knn_indices_b->data(); knn_dists = knn_dists_b->data(); std::vector<float*> ptrs(1); std::vector<int> sizes(1); ptrs[0] = X; sizes[0] = n_samples; raft::spatial::knn::brute_force_knn(handle, ptrs, sizes, n_features, X, n_samples, knn_indices, knn_dists, umap_params.n_neighbors); CUDA_CHECK(hipStreamSynchronize(stream)); } float* model_embedding = nullptr; device_buffer<float>* model_embedding_b; if (test_params.fit_transform) { model_embedding = embedding_ptr; } else { model_embedding_b = new device_buffer<float>( alloc, stream, n_samples * umap_params.n_components); model_embedding = model_embedding_b->data(); } CUDA_CHECK(hipMemsetAsync( model_embedding, 0, n_samples * umap_params.n_components * sizeof(float), stream)); CUDA_CHECK(hipStreamSynchronize(stream)); if (test_params.supervised) { ML::UMAP::fit(handle, X, y, n_samples, n_features, knn_indices, knn_dists, &umap_params, model_embedding); } else { ML::UMAP::fit(handle, X, nullptr, n_samples, n_features, knn_indices, knn_dists, &umap_params, model_embedding); } CUDA_CHECK(hipStreamSynchronize(stream)); if (!test_params.fit_transform) { CUDA_CHECK(hipMemsetAsync( embedding_ptr, 0, n_samples * umap_params.n_components * sizeof(float), stream)); CUDA_CHECK(hipStreamSynchronize(stream)); ML::UMAP::transform(handle, X, n_samples, umap_params.n_components, knn_indices, knn_dists, X, n_samples, model_embedding, n_samples, &umap_params, embedding_ptr); CUDA_CHECK(hipStreamSynchronize(stream)); delete model_embedding_b; } if (test_params.knn_params) { delete knn_indices_b; delete knn_dists_b; } } void assertions(raft::handle_t& handle, float* X, float* embedding_ptr, TestParams& test_params, UMAPParams& umap_params) { hipStream_t stream = handle.get_stream(); auto alloc = handle.get_device_allocator(); int& n_samples = test_params.n_samples; int& n_features = test_params.n_features; ASSERT_TRUE(!has_nan(embedding_ptr, n_samples * umap_params.n_components, alloc, stream)); double trustworthiness = trustworthiness_score<float, raft::distance::DistanceType::L2SqrtUnexpanded>( handle, X, embedding_ptr, n_samples, n_features, umap_params.n_components, umap_params.n_neighbors); std::cout << "min. expected trustworthiness: " << test_params.min_trustworthiness << std::endl; std::cout << "trustworthiness: " << trustworthiness << std::endl; ASSERT_TRUE(trustworthiness > test_params.min_trustworthiness); } void test(TestParams& test_params, UMAPParams& umap_params) { std::cout << "\numap_params : [" << std::boolalpha << umap_params.n_neighbors << "-" << umap_params.n_components << "-" << umap_params.n_epochs << "-" << umap_params.random_state << "-" << umap_params.multicore_implem << "]" << std::endl; std::cout << "test_params : [" << std::boolalpha << test_params.fit_transform << "-" << test_params.supervised << "-" << test_params.knn_params << "-" << test_params.n_samples << "-" << test_params.n_features << "-" << test_params.n_clusters << "-" << test_params.min_trustworthiness << "]" << std::endl; raft::handle_t handle; hipStream_t stream = handle.get_stream(); auto alloc = handle.get_device_allocator(); int& n_samples = test_params.n_samples; int& n_features = test_params.n_features; UMAP::find_ab(handle, &umap_params); device_buffer<float> X_d(alloc, stream, n_samples * n_features); device_buffer<int> y_d(alloc, stream, n_samples); ML::Datasets::make_blobs(handle, X_d.data(), y_d.data(), n_samples, n_features, test_params.n_clusters, true, nullptr, nullptr, 1.f, true, -10.f, 10.f, 1234ULL); CUDA_CHECK(hipStreamSynchronize(stream)); MLCommon::LinAlg::convert_array((float*)y_d.data(), y_d.data(), n_samples, stream); CUDA_CHECK(hipStreamSynchronize(stream)); device_buffer<float> embeddings1(alloc, stream, n_samples * umap_params.n_components); float* e1 = embeddings1.data(); get_embedding(handle, X_d.data(), (float*)y_d.data(), e1, test_params, umap_params); assertions(handle, X_d.data(), e1, test_params, umap_params); // Disable reproducibility tests after transformation if (!test_params.fit_transform) { return; } #if CUDART_VERSION >= 11020 if (!umap_params.multicore_implem) { device_buffer<float> embeddings2(alloc, stream, n_samples * umap_params.n_components); float* e2 = embeddings2.data(); get_embedding(handle, X_d.data(), (float*)y_d.data(), e2, test_params, umap_params); bool equal = are_equal(e1, e2, n_samples * umap_params.n_components, alloc, stream); if (!equal) { raft::print_device_vector("e1", e1, 25, std::cout); raft::print_device_vector("e2", e2, 25, std::cout); } ASSERT_TRUE(equal); } #endif } void SetUp() override { std::vector<TestParams> test_params_vec = { {false, false, false, 2000, 50, 20, 0.45}, {true, false, false, 2000, 50, 20, 0.45}, {false, true, false, 2000, 50, 20, 0.45}, {false, false, true, 2000, 50, 20, 0.45}, {true, true, false, 2000, 50, 20, 0.45}, {true, false, true, 2000, 50, 20, 0.45}, {false, true, true, 2000, 50, 20, 0.45}, {true, true, true, 2000, 50, 20, 0.45}}; std::vector<UMAPParams> umap_params_vec(4); umap_params_vec[0].n_components = 2; umap_params_vec[0].multicore_implem = true; umap_params_vec[1].n_components = 10; umap_params_vec[1].multicore_implem = true; umap_params_vec[2].n_components = 21; umap_params_vec[2].random_state = 43; umap_params_vec[2].init = 0; umap_params_vec[2].multicore_implem = false; umap_params_vec[2].optim_batch_size = 0; // use default value umap_params_vec[2].n_epochs = 500; umap_params_vec[3].n_components = 25; umap_params_vec[3].random_state = 43; umap_params_vec[3].init = 0; umap_params_vec[3].multicore_implem = false; umap_params_vec[3].optim_batch_size = 0; // use default value umap_params_vec[3].n_epochs = 500; for (auto& umap_params : umap_params_vec) { for (auto& test_params : test_params_vec) { test(test_params, umap_params); } } } void TearDown() override {} }; typedef UMAPParametrizableTest UMAPParametrizableTest; TEST_F(UMAPParametrizableTest, Result) {}
192659b846255d584f38229662a5862e4f044625.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <iostream> #include <vector> #include <cuml/manifold/umapparams.h> #include <datasets/digits.h> #include <raft/cudart_utils.h> #include <cuml/common/cuml_allocator.hpp> #include <cuml/common/device_buffer.hpp> #include <cuml/cuml.hpp> #include <cuml/datasets/make_blobs.hpp> #include <cuml/manifold/umap.hpp> #include <cuml/neighbors/knn.hpp> #include <distance/distance.cuh> #include <linalg/reduce_rows_by_key.cuh> #include <metrics/trustworthiness.cuh> #include <raft/cuda_utils.cuh> #include <selection/knn.cuh> #include <umap/runner.cuh> using namespace ML; using namespace ML::Metrics; using namespace std; using namespace MLCommon; using namespace MLCommon::Distance; using namespace MLCommon::Datasets::Digits; template <typename T> __global__ void has_nan_kernel(T* data, size_t len, bool* answer) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; bool val = data[tid]; if (val != val) { *answer = true; } } template <typename T> bool has_nan(T* data, size_t len, std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream) { dim3 blk(256); dim3 grid(raft::ceildiv(len, (size_t)blk.x)); bool h_answer = false; device_buffer<bool> d_answer(alloc, stream, 1); raft::update_device(d_answer.data(), &h_answer, 1, stream); has_nan_kernel<<<grid, blk, 0, stream>>>(data, len, d_answer.data()); raft::update_host(&h_answer, d_answer.data(), 1, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); return h_answer; } template <typename T> __global__ void are_equal_kernel(T* embedding1, T* embedding2, size_t len, double* diff) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (embedding1[tid] != embedding2[tid]) { atomicAdd(diff, abs(embedding1[tid] - embedding2[tid])); } } template <typename T> bool are_equal(T* embedding1, T* embedding2, size_t len, std::shared_ptr<deviceAllocator> alloc, cudaStream_t stream) { double h_answer = 0.; device_buffer<double> d_answer(alloc, stream, 1); raft::update_device(d_answer.data(), &h_answer, 1, stream); are_equal_kernel<<<raft::ceildiv(len, (size_t)32), 32, 0, stream>>>( embedding1, embedding2, len, d_answer.data()); raft::update_host(&h_answer, d_answer.data(), 1, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); double tolerance = 1.0; if (h_answer > tolerance) { std::cout << "Not equal, difference : " << h_answer << std::endl; return false; } return true; } class UMAPParametrizableTest : public ::testing::Test { protected: struct TestParams { bool fit_transform; bool supervised; bool knn_params; int n_samples; int n_features; int n_clusters; double min_trustworthiness; }; void get_embedding(raft::handle_t& handle, float* X, float* y, float* embedding_ptr, TestParams& test_params, UMAPParams& umap_params) { cudaStream_t stream = handle.get_stream(); auto alloc = handle.get_device_allocator(); int& n_samples = test_params.n_samples; int& n_features = test_params.n_features; device_buffer<int64_t>* knn_indices_b; device_buffer<float>* knn_dists_b; int64_t* knn_indices = nullptr; float* knn_dists = nullptr; if (test_params.knn_params) { knn_indices_b = new device_buffer<int64_t>( alloc, stream, n_samples * umap_params.n_neighbors); knn_dists_b = new device_buffer<float>( alloc, stream, n_samples * umap_params.n_neighbors); knn_indices = knn_indices_b->data(); knn_dists = knn_dists_b->data(); std::vector<float*> ptrs(1); std::vector<int> sizes(1); ptrs[0] = X; sizes[0] = n_samples; raft::spatial::knn::brute_force_knn(handle, ptrs, sizes, n_features, X, n_samples, knn_indices, knn_dists, umap_params.n_neighbors); CUDA_CHECK(cudaStreamSynchronize(stream)); } float* model_embedding = nullptr; device_buffer<float>* model_embedding_b; if (test_params.fit_transform) { model_embedding = embedding_ptr; } else { model_embedding_b = new device_buffer<float>( alloc, stream, n_samples * umap_params.n_components); model_embedding = model_embedding_b->data(); } CUDA_CHECK(cudaMemsetAsync( model_embedding, 0, n_samples * umap_params.n_components * sizeof(float), stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); if (test_params.supervised) { ML::UMAP::fit(handle, X, y, n_samples, n_features, knn_indices, knn_dists, &umap_params, model_embedding); } else { ML::UMAP::fit(handle, X, nullptr, n_samples, n_features, knn_indices, knn_dists, &umap_params, model_embedding); } CUDA_CHECK(cudaStreamSynchronize(stream)); if (!test_params.fit_transform) { CUDA_CHECK(cudaMemsetAsync( embedding_ptr, 0, n_samples * umap_params.n_components * sizeof(float), stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); ML::UMAP::transform(handle, X, n_samples, umap_params.n_components, knn_indices, knn_dists, X, n_samples, model_embedding, n_samples, &umap_params, embedding_ptr); CUDA_CHECK(cudaStreamSynchronize(stream)); delete model_embedding_b; } if (test_params.knn_params) { delete knn_indices_b; delete knn_dists_b; } } void assertions(raft::handle_t& handle, float* X, float* embedding_ptr, TestParams& test_params, UMAPParams& umap_params) { cudaStream_t stream = handle.get_stream(); auto alloc = handle.get_device_allocator(); int& n_samples = test_params.n_samples; int& n_features = test_params.n_features; ASSERT_TRUE(!has_nan(embedding_ptr, n_samples * umap_params.n_components, alloc, stream)); double trustworthiness = trustworthiness_score<float, raft::distance::DistanceType::L2SqrtUnexpanded>( handle, X, embedding_ptr, n_samples, n_features, umap_params.n_components, umap_params.n_neighbors); std::cout << "min. expected trustworthiness: " << test_params.min_trustworthiness << std::endl; std::cout << "trustworthiness: " << trustworthiness << std::endl; ASSERT_TRUE(trustworthiness > test_params.min_trustworthiness); } void test(TestParams& test_params, UMAPParams& umap_params) { std::cout << "\numap_params : [" << std::boolalpha << umap_params.n_neighbors << "-" << umap_params.n_components << "-" << umap_params.n_epochs << "-" << umap_params.random_state << "-" << umap_params.multicore_implem << "]" << std::endl; std::cout << "test_params : [" << std::boolalpha << test_params.fit_transform << "-" << test_params.supervised << "-" << test_params.knn_params << "-" << test_params.n_samples << "-" << test_params.n_features << "-" << test_params.n_clusters << "-" << test_params.min_trustworthiness << "]" << std::endl; raft::handle_t handle; cudaStream_t stream = handle.get_stream(); auto alloc = handle.get_device_allocator(); int& n_samples = test_params.n_samples; int& n_features = test_params.n_features; UMAP::find_ab(handle, &umap_params); device_buffer<float> X_d(alloc, stream, n_samples * n_features); device_buffer<int> y_d(alloc, stream, n_samples); ML::Datasets::make_blobs(handle, X_d.data(), y_d.data(), n_samples, n_features, test_params.n_clusters, true, nullptr, nullptr, 1.f, true, -10.f, 10.f, 1234ULL); CUDA_CHECK(cudaStreamSynchronize(stream)); MLCommon::LinAlg::convert_array((float*)y_d.data(), y_d.data(), n_samples, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); device_buffer<float> embeddings1(alloc, stream, n_samples * umap_params.n_components); float* e1 = embeddings1.data(); get_embedding(handle, X_d.data(), (float*)y_d.data(), e1, test_params, umap_params); assertions(handle, X_d.data(), e1, test_params, umap_params); // Disable reproducibility tests after transformation if (!test_params.fit_transform) { return; } #if CUDART_VERSION >= 11020 if (!umap_params.multicore_implem) { device_buffer<float> embeddings2(alloc, stream, n_samples * umap_params.n_components); float* e2 = embeddings2.data(); get_embedding(handle, X_d.data(), (float*)y_d.data(), e2, test_params, umap_params); bool equal = are_equal(e1, e2, n_samples * umap_params.n_components, alloc, stream); if (!equal) { raft::print_device_vector("e1", e1, 25, std::cout); raft::print_device_vector("e2", e2, 25, std::cout); } ASSERT_TRUE(equal); } #endif } void SetUp() override { std::vector<TestParams> test_params_vec = { {false, false, false, 2000, 50, 20, 0.45}, {true, false, false, 2000, 50, 20, 0.45}, {false, true, false, 2000, 50, 20, 0.45}, {false, false, true, 2000, 50, 20, 0.45}, {true, true, false, 2000, 50, 20, 0.45}, {true, false, true, 2000, 50, 20, 0.45}, {false, true, true, 2000, 50, 20, 0.45}, {true, true, true, 2000, 50, 20, 0.45}}; std::vector<UMAPParams> umap_params_vec(4); umap_params_vec[0].n_components = 2; umap_params_vec[0].multicore_implem = true; umap_params_vec[1].n_components = 10; umap_params_vec[1].multicore_implem = true; umap_params_vec[2].n_components = 21; umap_params_vec[2].random_state = 43; umap_params_vec[2].init = 0; umap_params_vec[2].multicore_implem = false; umap_params_vec[2].optim_batch_size = 0; // use default value umap_params_vec[2].n_epochs = 500; umap_params_vec[3].n_components = 25; umap_params_vec[3].random_state = 43; umap_params_vec[3].init = 0; umap_params_vec[3].multicore_implem = false; umap_params_vec[3].optim_batch_size = 0; // use default value umap_params_vec[3].n_epochs = 500; for (auto& umap_params : umap_params_vec) { for (auto& test_params : test_params_vec) { test(test_params, umap_params); } } } void TearDown() override {} }; typedef UMAPParametrizableTest UMAPParametrizableTest; TEST_F(UMAPParametrizableTest, Result) {}
acdac74933f8e29b40df88f05c599f93fe79214c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <functional> #pragma once #ifdef PADDLE_WITH_HETERPS #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" namespace paddle { namespace framework { /* comment 0 this kernel just serves as an example of how to sample nodes' neighbors. feel free to modify it index[0,len) saves the nodes' index actual_size[0,len) is to save the sample size of each node. for ith node in index, actual_size[i] = min(node i's neighbor size, sample size) sample_result is to save the neighbor sampling result, its size is len * sample_size; */ __global__ void get_cpu_id_index(int64_t* key, int* actual_sample_size, int64_t* cpu_key, int* sum, int* index, int len) { CUDA_KERNEL_LOOP(i, len) { if (actual_sample_size[i] == -1) { int old = atomicAdd(sum, 1); cpu_key[old] = key[i]; index[old] = i; // printf("old %d i-%d key:%lld\n",old,i,key[i]); } } } __global__ void get_actual_gpu_ac(int* gpu_ac, int number_on_cpu) { CUDA_KERNEL_LOOP(i, number_on_cpu) { gpu_ac[i] /= sizeof(int64_t); } } template <int WARP_SIZE, int BLOCK_WARPS, int TILE_SIZE> __global__ void copy_buffer_ac_to_final_place( int64_t* gpu_buffer, int* gpu_ac, int64_t* val, int* actual_sample_size, int* index, int* cumsum_gpu_ac, int number_on_cpu, int sample_size) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BLOCK_WARPS); int i = blockIdx.x * TILE_SIZE + threadIdx.y; const int last_idx = min(static_cast<int>(blockIdx.x + 1) * TILE_SIZE, number_on_cpu); while (i < last_idx) { actual_sample_size[index[i]] = gpu_ac[i]; for (int j = threadIdx.x; j < gpu_ac[i]; j += WARP_SIZE) { val[index[i] * sample_size + j] = gpu_buffer[cumsum_gpu_ac[i] + j]; } i += BLOCK_WARPS; } } template <int WARP_SIZE, int BLOCK_WARPS, int TILE_SIZE> __global__ void neighbor_sample_example_v2(GpuPsCommGraph graph, int64_t* node_index, int* actual_size, int64_t* res, int sample_len, int n, int default_value) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BLOCK_WARPS); int i = blockIdx.x * TILE_SIZE + threadIdx.y; const int last_idx = min(static_cast<int>(blockIdx.x + 1) * TILE_SIZE, n); hiprandState_t rng; hiprand_init(blockIdx.x, threadIdx.y * WARP_SIZE + threadIdx.x, 0, &rng); while (i < last_idx) { if (node_index[i] == -1) { actual_size[i] = default_value; i += BLOCK_WARPS; continue; } int neighbor_len = (int)graph.node_list[node_index[i]].neighbor_size; int64_t data_offset = graph.node_list[node_index[i]].neighbor_offset; int offset = i * sample_len; int64_t* data = graph.neighbor_list; if (neighbor_len <= sample_len) { for (int j = threadIdx.x; j < neighbor_len; j += WARP_SIZE) { res[offset + j] = data[data_offset + j]; } actual_size[i] = neighbor_len; } else { for (int j = threadIdx.x; j < sample_len; j += WARP_SIZE) { res[offset + j] = j; } __syncwarp(); for (int j = sample_len + threadIdx.x; j < neighbor_len; j += WARP_SIZE) { const int num = hiprand(&rng) % (j + 1); if (num < sample_len) { atomicMax(reinterpret_cast<unsigned int*>(res + offset + num), static_cast<unsigned int>(j)); } } __syncwarp(); for (int j = threadIdx.x; j < sample_len; j += WARP_SIZE) { const int64_t perm_idx = res[offset + j] + data_offset; res[offset + j] = data[perm_idx]; } actual_size[i] = sample_len; } i += BLOCK_WARPS; } } __global__ void neighbor_sample_example(GpuPsCommGraph graph, int64_t* node_index, int* actual_size, int64_t* res, int sample_len, int* sample_status, int n, int from) { int id = blockIdx.x * blockDim.y + threadIdx.y; if (id < n) { if (node_index[id] == -1) { actual_size[id] = 0; return; } hiprandState_t rng; hiprand_init(blockIdx.x, threadIdx.x, threadIdx.y, &rng); int64_t index = threadIdx.x; int64_t offset = id * sample_len; int64_t* data = graph.neighbor_list; int64_t data_offset = graph.node_list[node_index[id]].neighbor_offset; int64_t neighbor_len = graph.node_list[node_index[id]].neighbor_size; int ac_len; if (sample_len > neighbor_len) ac_len = neighbor_len; else { ac_len = sample_len; } if (4 * ac_len >= 3 * neighbor_len) { if (index == 0) { res[offset] = hiprand(&rng) % (neighbor_len - ac_len + 1); } __syncwarp(); int start = res[offset]; while (index < ac_len) { res[offset + index] = data[data_offset + start + index]; index += blockDim.x; } actual_size[id] = ac_len; } else { while (index < ac_len) { int num = hiprand(&rng) % neighbor_len; int* addr = sample_status + data_offset + num; int expected = *addr; if (!(expected & (1 << from))) { int old = atomicCAS(addr, expected, expected | (1 << from)); if (old == expected) { res[offset + index] = num; index += blockDim.x; } } } __syncwarp(); index = threadIdx.x; while (index < ac_len) { int* addr = sample_status + data_offset + res[offset + index]; int expected, old = *addr; do { expected = old; old = atomicCAS(addr, expected, expected & (~(1 << from))); } while (old != expected); res[offset + index] = data[data_offset + res[offset + index]]; index += blockDim.x; } actual_size[id] = ac_len; } } // const size_t i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // auto node_index = index[i]; // actual_size[i] = graph.node_list[node_index].neighbor_size < sample_size // ? graph.node_list[node_index].neighbor_size // : sample_size; // int offset = graph.node_list[node_index].neighbor_offset; // for (int j = 0; j < actual_size[i]; j++) { // sample_result[sample_size * i + j] = graph.neighbor_list[offset + j]; // } // } } int GpuPsGraphTable::init_cpu_table( const paddle::distributed::GraphParameter& graph) { cpu_graph_table.reset(new paddle::distributed::GraphTable); cpu_table_status = cpu_graph_table->Initialize(graph); // if (cpu_table_status != 0) return cpu_table_status; // std::function<void(std::vector<GpuPsCommGraph>&)> callback = // [this](std::vector<GpuPsCommGraph>& res) { // pthread_rwlock_wrlock(this->rw_lock.get()); // this->clear_graph_info(); // this->build_graph_from_cpu(res); // pthread_rwlock_unlock(this->rw_lock.get()); // cv_.notify_one(); // }; // cpu_graph_table->set_graph_sample_callback(callback); return cpu_table_status; } // int GpuPsGraphTable::load(const std::string& path, const std::string& param) // { // int status = cpu_graph_table->load(path, param); // if (status != 0) { // return status; // } // std::unique_lock<std::mutex> lock(mutex_); // cpu_graph_table->start_graph_sampling(); // cv_.wait(lock); // return 0; // } /* comment 1 gpu i triggers a neighbor_sample task, when this task is done, this function is called to move the sample result on other gpu back to gup i and aggragate the result. the sample_result is saved on src_sample_res and the actual sample size for each node is saved on actual_sample_size. the number of actual sample_result for key[x] (refer to comment 2 for definition of key) is saved on actual_sample_size[x], since the neighbor size of key[x] might be smaller than sample_size, is saved on src_sample_res [x*sample_size, x*sample_size + actual_sample_size[x]) since before each gpu runs the neighbor_sample task,the key array is shuffled, but we have the idx array to save the original order. when the gpu i gets all the sample results from other gpus, it relies on idx array to recover the original order. that's what fill_dvals does. */ void GpuPsGraphTable::display_sample_res(void* key, void* val, int len, int sample_len) { char key_buffer[len * sizeof(int64_t)]; char val_buffer[sample_len * sizeof(int64_t) * len + (len + len % 2) * sizeof(int) + len * sizeof(int64_t)]; hipMemcpy(key_buffer, key, sizeof(int64_t) * len, hipMemcpyDeviceToHost); hipMemcpy(val_buffer, val, sample_len * sizeof(int64_t) * len + (len + len % 2) * sizeof(int) + len * sizeof(int64_t), hipMemcpyDeviceToHost); int64_t* sample_val = (int64_t*)(val_buffer + (len + len % 2) * sizeof(int) + len * sizeof(int64_t)); for (int i = 0; i < len; i++) { printf("key %lld\n", *(int64_t*)(key_buffer + i * sizeof(int64_t))); printf("index %lld\n", *(int64_t*)(val_buffer + i * sizeof(int64_t))); int ac_size = *(int*)(val_buffer + i * sizeof(int) + len * sizeof(int64_t)); printf("sampled %d neigbhors\n", ac_size); for (int j = 0; j < ac_size; j++) { printf("%lld ", sample_val[i * sample_len + j]); } printf("\n"); } } void GpuPsGraphTable::move_neighbor_sample_result_to_source_gpu( int start_index, int gpu_num, int sample_size, int* h_left, int* h_right, int64_t* src_sample_res, int* actual_sample_size) { int shard_len[gpu_num]; for (int i = 0; i < gpu_num; i++) { if (h_left[i] == -1 || h_right[i] == -1) { continue; } shard_len[i] = h_right[i] - h_left[i] + 1; int cur_step = (int)path_[start_index][i].nodes_.size() - 1; for (int j = cur_step; j > 0; j--) { hipMemcpyAsync(path_[start_index][i].nodes_[j - 1].val_storage, path_[start_index][i].nodes_[j].val_storage, path_[start_index][i].nodes_[j - 1].val_bytes_len, hipMemcpyDefault, path_[start_index][i].nodes_[j - 1].out_stream); } auto& node = path_[start_index][i].nodes_.front(); hipMemcpyAsync( reinterpret_cast<char*>(src_sample_res + h_left[i] * sample_size), node.val_storage + sizeof(int64_t) * shard_len[i] + sizeof(int) * (shard_len[i] + shard_len[i] % 2), sizeof(int64_t) * shard_len[i] * sample_size, hipMemcpyDefault, node.out_stream); hipMemcpyAsync(reinterpret_cast<char*>(actual_sample_size + h_left[i]), node.val_storage + sizeof(int64_t) * shard_len[i], sizeof(int) * shard_len[i], hipMemcpyDefault, node.out_stream); } for (int i = 0; i < gpu_num; ++i) { if (h_left[i] == -1 || h_right[i] == -1) { continue; } auto& node = path_[start_index][i].nodes_.front(); hipStreamSynchronize(node.out_stream); // hipStreamSynchronize(resource_->remote_stream(i, start_index)); } /* std::queue<CopyTask> que; // auto& node = path_[gpu_id][i].nodes_.front(); // hipMemcpyAsync( // reinterpret_cast<char*>(src_sample_res + h_left[i] * sample_size), // node.val_storage + sizeof(int64_t) * shard_len, // node.val_bytes_len - sizeof(int64_t) * shard_len, hipMemcpyDefault, // node.out_stream); // hipMemcpyAsync(reinterpret_cast<char*>(actual_sample_size + h_left[i]), // node.val_storage + sizeof(int) * shard_len, // sizeof(int) * shard_len, hipMemcpyDefault, // node.out_stream); int cur_step = path_[start_index][i].nodes_.size() - 1; auto& node = path_[start_index][i].nodes_[cur_step]; if (cur_step == 0) { // hipMemcpyAsync(reinterpret_cast<char*>(src_val + h_left[i]), // node.val_storage, node.val_bytes_len, // hipMemcpyDefault, // node.out_stream); // VLOG(0)<<"copy "<<node.gpu_num<<" to "<<start_index; hipMemcpyAsync( reinterpret_cast<char*>(src_sample_res + h_left[i] * sample_size), node.val_storage + sizeof(int64_t) * shard_len[i], node.val_bytes_len - sizeof(int64_t) * shard_len[i], hipMemcpyDefault, node.out_stream); //resource_->remote_stream(i, start_index)); hipMemcpyAsync(reinterpret_cast<char*>(actual_sample_size + h_left[i]), node.val_storage + sizeof(int) * shard_len[i], sizeof(int) * shard_len[i], hipMemcpyDefault, node.out_stream); //resource_->remote_stream(i, start_index)); } else { CopyTask t(&path_[start_index][i], cur_step - 1); que.push(t); // VLOG(0)<<"copy "<<node.gpu_num<<" to "<<path_[start_index][i].nodes_[cur_step - 1].gpu_num; hipMemcpyAsync(path_[start_index][i].nodes_[cur_step - 1].val_storage, node.val_storage, path_[start_index][i].nodes_[cur_step - 1].val_bytes_len, hipMemcpyDefault, path_[start_index][i].nodes_[cur_step - 1].out_stream); //resource_->remote_stream(i, start_index)); } } while (!que.empty()) { CopyTask& cur_task = que.front(); que.pop(); int cur_step = cur_task.step; if (cur_task.path->nodes_[cur_step].sync) { hipStreamSynchronize(cur_task.path->nodes_[cur_step].out_stream); //hipStreamSynchronize(resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); } if (cur_step > 0) { CopyTask c(cur_task.path, cur_step - 1); que.push(c); hipMemcpyAsync(cur_task.path->nodes_[cur_step - 1].val_storage, cur_task.path->nodes_[cur_step].val_storage, cur_task.path->nodes_[cur_step - 1].val_bytes_len, hipMemcpyDefault, cur_task.path->nodes_[cur_step - 1].out_stream); //resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); } else if (cur_step == 0) { int end_index = cur_task.path->nodes_.back().gpu_num; // hipMemcpyAsync(reinterpret_cast<char*>(src_val + h_left[end_index]), // cur_task.path->nodes_[cur_step].val_storage, // cur_task.path->nodes_[cur_step].val_bytes_len, // hipMemcpyDefault, // cur_task.path->nodes_[cur_step].out_stream); //VLOG(0)<<"copy "<<cur_task.path->nodes_[cur_step].gpu_num<< " to "<<start_index; hipMemcpyAsync(reinterpret_cast<char*>(src_sample_res + h_left[end_index] * sample_size), cur_task.path->nodes_[cur_step].val_storage + sizeof(int64_t) * shard_len[end_index], cur_task.path->nodes_[cur_step].val_bytes_len - sizeof(int64_t) * shard_len[end_index], hipMemcpyDefault, cur_task.path->nodes_[cur_step].out_stream); //resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); hipMemcpyAsync( reinterpret_cast<char*>(actual_sample_size + h_left[end_index]), cur_task.path->nodes_[cur_step].val_storage + sizeof(int) * shard_len[end_index], sizeof(int) * shard_len[end_index], hipMemcpyDefault, cur_task.path->nodes_[cur_step].out_stream); //resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); } } for (int i = 0; i < gpu_num; ++i) { if (h_left[i] == -1 || h_right[i] == -1) { continue; } auto& node = path_[start_index][i].nodes_.front(); hipStreamSynchronize(node.out_stream); //hipStreamSynchronize(resource_->remote_stream(i, start_index)); } */ } /* TODO: how to optimize it to eliminate the for loop */ __global__ void fill_dvalues(int64_t* d_shard_vals, int64_t* d_vals, int* d_shard_actual_sample_size, int* d_actual_sample_size, int* idx, int sample_size, int len) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { d_actual_sample_size[idx[i]] = d_shard_actual_sample_size[i]; for (int j = 0; j < sample_size; j++) { d_vals[idx[i] * sample_size + j] = d_shard_vals[i * sample_size + j]; } } } __global__ void fill_actual_vals(int64_t* vals, int64_t* actual_vals, int* actual_sample_size, int* cumsum_actual_sample_size, int sample_size, int len) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { for (int j = 0; j < actual_sample_size[i]; j++) { actual_vals[cumsum_actual_sample_size[i] + j] = vals[sample_size * i + j]; } } } __global__ void node_query_example(GpuPsCommGraph graph, int start, int size, int64_t* res) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { res[i] = graph.node_list[start + i].node_id; } } void GpuPsGraphTable::clear_graph_info(int gpu_id) { if (tables_.size() && tables_[gpu_id] != NULL) { delete tables_[gpu_id]; } auto& graph = gpu_graph_list[gpu_id]; if (graph.neighbor_list != NULL) { hipFree(graph.neighbor_list); } if (graph.node_list != NULL) { hipFree(graph.node_list); } } void GpuPsGraphTable::clear_graph_info() { if (tables_.size()) { for (auto table : tables_) delete table; } tables_.clear(); for (auto graph : gpu_graph_list) { if (graph.neighbor_list != NULL) { hipFree(graph.neighbor_list); } if (graph.node_list != NULL) { hipFree(graph.node_list); } } gpu_graph_list.clear(); } /* the parameter std::vector<GpuPsCommGraph> cpu_graph_list is generated by cpu. it saves the graph to be saved on each gpu. for the ith GpuPsCommGraph, any the node's key satisfies that key % gpu_number == i In this function, memory is allocated on each gpu to save the graphs, gpu i saves the ith graph from cpu_graph_list */ void GpuPsGraphTable::build_graph_on_single_gpu(GpuPsCommGraph& g, int i) { clear_graph_info(i); platform::CUDADeviceGuard guard(resource_->dev_id(i)); // platform::CUDADeviceGuard guard(i); gpu_graph_list[i] = GpuPsCommGraph(); sample_status[i] = NULL; tables_[i] = new Table(::max((int64_t)1, g.node_size) / load_factor_); if (g.node_size > 0) { std::vector<int64_t> keys; std::vector<int64_t> offset; hipMalloc((void**)&gpu_graph_list[i].node_list, g.node_size * sizeof(GpuPsGraphNode)); hipMemcpy(gpu_graph_list[i].node_list, g.node_list, g.node_size * sizeof(GpuPsGraphNode), hipMemcpyHostToDevice); for (int64_t j = 0; j < g.node_size; j++) { keys.push_back(g.node_list[j].node_id); offset.push_back(j); } build_ps(i, (uint64_t*)keys.data(), offset.data(), keys.size(), 1024, 8); gpu_graph_list[i].node_size = g.node_size; } else { build_ps(i, NULL, NULL, 0, 1024, 8); gpu_graph_list[i].node_list = NULL; gpu_graph_list[i].node_size = 0; } if (g.neighbor_size) { hipError_t cudaStatus = hipMalloc((void**)&gpu_graph_list[i].neighbor_list, g.neighbor_size * sizeof(int64_t)); PADDLE_ENFORCE_EQ(cudaStatus, hipSuccess, platform::errors::InvalidArgument( "ailed to allocate memory for graph on gpu ")); VLOG(0) << "sucessfully allocate " << g.neighbor_size * sizeof(int64_t) << " bytes of memory for graph-edges on gpu " << resource_->dev_id(i); hipMemcpy(gpu_graph_list[i].neighbor_list, g.neighbor_list, g.neighbor_size * sizeof(int64_t), hipMemcpyHostToDevice); gpu_graph_list[i].neighbor_size = g.neighbor_size; } else { gpu_graph_list[i].neighbor_list = NULL; gpu_graph_list[i].neighbor_size = 0; } } void GpuPsGraphTable::init_sample_status() { for (int i = 0; i < gpu_num; i++) { if (gpu_graph_list[i].neighbor_size) { platform::CUDADeviceGuard guard(resource_->dev_id(i)); int* addr; hipMalloc((void**)&addr, gpu_graph_list[i].neighbor_size * sizeof(int)); hipMemset(addr, 0, gpu_graph_list[i].neighbor_size * sizeof(int)); sample_status[i] = addr; } } } void GpuPsGraphTable::free_sample_status() { for (int i = 0; i < gpu_num; i++) { if (sample_status[i] != NULL) { platform::CUDADeviceGuard guard(resource_->dev_id(i)); hipFree(sample_status[i]); } } } void GpuPsGraphTable::build_graph_from_cpu( std::vector<GpuPsCommGraph>& cpu_graph_list) { VLOG(0) << "in build_graph_from_cpu cpu_graph_list size = " << cpu_graph_list.size(); PADDLE_ENFORCE_EQ( cpu_graph_list.size(), resource_->total_device(), platform::errors::InvalidArgument("the cpu node list size doesn't match " "the number of gpu on your machine.")); clear_graph_info(); for (int i = 0; i < cpu_graph_list.size(); i++) { platform::CUDADeviceGuard guard(resource_->dev_id(i)); gpu_graph_list[i] = GpuPsCommGraph(); sample_status[i] = NULL; tables_[i] = new Table(::max((int64_t)1, cpu_graph_list[i].node_size) / load_factor_); if (cpu_graph_list[i].node_size > 0) { std::vector<int64_t> keys; std::vector<int64_t> offset; hipMalloc((void**)&gpu_graph_list[i].node_list, cpu_graph_list[i].node_size * sizeof(GpuPsGraphNode)); hipMemcpy(gpu_graph_list[i].node_list, cpu_graph_list[i].node_list, cpu_graph_list[i].node_size * sizeof(GpuPsGraphNode), hipMemcpyHostToDevice); for (int64_t j = 0; j < cpu_graph_list[i].node_size; j++) { keys.push_back(cpu_graph_list[i].node_list[j].node_id); offset.push_back(j); } build_ps(i, (uint64_t*)(keys.data()), offset.data(), keys.size(), 1024, 8); gpu_graph_list[i].node_size = cpu_graph_list[i].node_size; } else { build_ps(i, NULL, NULL, 0, 1024, 8); gpu_graph_list[i].node_list = NULL; gpu_graph_list[i].node_size = 0; } if (cpu_graph_list[i].neighbor_size) { hipMalloc((void**)&gpu_graph_list[i].neighbor_list, cpu_graph_list[i].neighbor_size * sizeof(int64_t)); hipMemcpy(gpu_graph_list[i].neighbor_list, cpu_graph_list[i].neighbor_list, cpu_graph_list[i].neighbor_size * sizeof(int64_t), hipMemcpyHostToDevice); gpu_graph_list[i].neighbor_size = cpu_graph_list[i].neighbor_size; } else { gpu_graph_list[i].neighbor_list = NULL; gpu_graph_list[i].neighbor_size = 0; } } hipDeviceSynchronize(); } NeighborSampleResult GpuPsGraphTable::graph_neighbor_sample_v3( NeighborSampleQuery q, bool cpu_switch) { return graph_neighbor_sample_v2(global_device_map[q.gpu_id], q.key, q.sample_size, q.len, cpu_switch); } NeighborSampleResult GpuPsGraphTable::graph_neighbor_sample(int gpu_id, int64_t* key, int sample_size, int len) { /* comment 2 this function shares some kernels with heter_comm_inl.h arguments definitions: gpu_id:the id of gpu. len:how many keys are used,(the length of array key) sample_size:how many neighbors should be sampled for each node in key. the code below shuffle the key array to make the keys that belong to a gpu-card stay together, the shuffled result is saved on d_shard_keys, if ith element in d_shard_keys_ptr is from jth element in the original key array, then idx[i] = j, idx could be used to recover the original array. if keys in range [a,b] belong to ith-gpu, then h_left[i] = a, h_right[i] = b, if no keys are allocated for ith-gpu, then h_left[i] == h_right[i] == -1 for example, suppose key = [0,1,2,3,4,5,6,7,8], gpu_num = 2 when we run this neighbor_sample function, the key is shuffled to [0,2,4,6,8,1,3,5,7] the first part (0,2,4,6,8) % 2 == 0,thus should be handled by gpu 0, the rest part should be handled by gpu1, because (1,3,5,7) % 2 == 1, h_left = [0,5],h_right = [4,8] */ NeighborSampleResult result; result.initialize(sample_size, len, resource_->dev_id(gpu_id)); if (len == 0) { return result; } platform::CUDAPlace place = platform::CUDAPlace(resource_->dev_id(gpu_id)); platform::CUDADeviceGuard guard(resource_->dev_id(gpu_id)); int* actual_sample_size = result.actual_sample_size; int64_t* val = result.val; int total_gpu = resource_->total_device(); auto stream = resource_->local_stream(gpu_id, 0); int grid_size = (len - 1) / block_size_ + 1; int h_left[total_gpu]; // NOLINT int h_right[total_gpu]; // NOLINT auto d_left = memory::Alloc(place, total_gpu * sizeof(int)); auto d_right = memory::Alloc(place, total_gpu * sizeof(int)); int* d_left_ptr = reinterpret_cast<int*>(d_left->ptr()); int* d_right_ptr = reinterpret_cast<int*>(d_right->ptr()); hipMemsetAsync(d_left_ptr, -1, total_gpu * sizeof(int), stream); hipMemsetAsync(d_right_ptr, -1, total_gpu * sizeof(int), stream); // auto d_idx = memory::Alloc(place, len * sizeof(int)); int* d_idx_ptr = reinterpret_cast<int*>(d_idx->ptr()); auto d_shard_keys = memory::Alloc(place, len * sizeof(int64_t)); int64_t* d_shard_keys_ptr = reinterpret_cast<int64_t*>(d_shard_keys->ptr()); auto d_shard_vals = memory::Alloc(place, sample_size * len * sizeof(int64_t)); int64_t* d_shard_vals_ptr = reinterpret_cast<int64_t*>(d_shard_vals->ptr()); auto d_shard_actual_sample_size = memory::Alloc(place, len * sizeof(int)); int* d_shard_actual_sample_size_ptr = reinterpret_cast<int*>(d_shard_actual_sample_size->ptr()); split_input_to_shard((uint64_t*)(key), d_idx_ptr, len, d_left_ptr, d_right_ptr, gpu_id); heter_comm_kernel_->fill_shard_key(d_shard_keys_ptr, key, d_idx_ptr, len, stream); hipStreamSynchronize(stream); hipMemcpy(h_left, d_left_ptr, total_gpu * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_right, d_right_ptr, total_gpu * sizeof(int), hipMemcpyDeviceToHost); // auto start1 = std::chrono::steady_clock::now(); for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } /* comment 3 shard_len denotes the size of keys on i-th gpu here, when we sample on i-th gpu, we allocate shard_len * (1 + sample_size) int64_t units of memory, we use alloc_mem_i to denote it, the range [0,shard_len) is saved for the respective nodes' indexes and acutal sample_size. with nodes' indexes we could get the nodes to sample. since size of int64_t is 8 bits, while size of int is 4, the range of [0,shard_len) contains shard_len * 2 int uinits; The values of the first half of this range will be updated by the k-v map on i-th-gpu. The second half of this range is saved for actual sample size of each node. For node x, its sampling result is saved on the range [shard_len + sample_size * x,shard_len + sample_size * x + actual_sample_size_of_x) of alloc_mem_i, actual_sample_size_of_x equals ((int *)alloc_mem_i)[shard_len + x] */ create_storage(gpu_id, i, shard_len * sizeof(int64_t), shard_len * (1 + sample_size) * sizeof(int64_t) + sizeof(int) * (shard_len + shard_len % 2)); // auto& node = path_[gpu_id][i].nodes_[0]; } walk_to_dest(gpu_id, total_gpu, h_left, h_right, (uint64_t*)(d_shard_keys_ptr), NULL); for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; auto& node = path_[gpu_id][i].nodes_.back(); hipMemsetAsync(node.val_storage, -1, shard_len * sizeof(int64_t), node.in_stream); hipStreamSynchronize(node.in_stream); platform::CUDADeviceGuard guard(resource_->dev_id(i)); tables_[i]->get(reinterpret_cast<uint64_t*>(node.key_storage), reinterpret_cast<int64_t*>(node.val_storage), h_right[i] - h_left[i] + 1, resource_->remote_stream(i, gpu_id)); // node.in_stream); auto graph = gpu_graph_list[i]; int64_t* id_array = reinterpret_cast<int64_t*>(node.val_storage); int* actual_size_array = (int*)(id_array + shard_len); int64_t* sample_array = (int64_t*)(actual_size_array + shard_len + shard_len % 2); int sample_grid_size = (shard_len - 1) / dim_y + 1; dim3 block(parallel_sample_size, dim_y); dim3 grid(sample_grid_size); hipLaunchKernelGGL(( neighbor_sample_example), dim3(grid), dim3(block), 0, resource_->remote_stream(i, gpu_id), graph, id_array, actual_size_array, sample_array, sample_size, sample_status[i], shard_len, gpu_id); } for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } hipStreamSynchronize(resource_->remote_stream(i, gpu_id)); } move_neighbor_sample_result_to_source_gpu(gpu_id, total_gpu, sample_size, h_left, h_right, d_shard_vals_ptr, d_shard_actual_sample_size_ptr); hipLaunchKernelGGL(( fill_dvalues), dim3(grid_size), dim3(block_size_), 0, stream, d_shard_vals_ptr, val, d_shard_actual_sample_size_ptr, actual_sample_size, d_idx_ptr, sample_size, len); for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } destroy_storage(gpu_id, i); } hipStreamSynchronize(stream); return result; } NeighborSampleResult GpuPsGraphTable::graph_neighbor_sample_v2( int gpu_id, int64_t* key, int sample_size, int len, bool cpu_query_switch) { NeighborSampleResult result; result.initialize(sample_size, len, resource_->dev_id(gpu_id)); if (len == 0) { return result; } platform::CUDAPlace place = platform::CUDAPlace(resource_->dev_id(gpu_id)); platform::CUDADeviceGuard guard(resource_->dev_id(gpu_id)); int* actual_sample_size = result.actual_sample_size; int64_t* val = result.val; int total_gpu = resource_->total_device(); auto stream = resource_->local_stream(gpu_id, 0); int grid_size = (len - 1) / block_size_ + 1; int h_left[total_gpu]; // NOLINT int h_right[total_gpu]; // NOLINT auto d_left = memory::Alloc(place, total_gpu * sizeof(int)); auto d_right = memory::Alloc(place, total_gpu * sizeof(int)); int* d_left_ptr = reinterpret_cast<int*>(d_left->ptr()); int* d_right_ptr = reinterpret_cast<int*>(d_right->ptr()); int default_value = 0; if (cpu_query_switch) { default_value = -1; } hipMemsetAsync(d_left_ptr, -1, total_gpu * sizeof(int), stream); hipMemsetAsync(d_right_ptr, -1, total_gpu * sizeof(int), stream); // auto d_idx = memory::Alloc(place, len * sizeof(int)); int* d_idx_ptr = reinterpret_cast<int*>(d_idx->ptr()); auto d_shard_keys = memory::Alloc(place, len * sizeof(int64_t)); int64_t* d_shard_keys_ptr = reinterpret_cast<int64_t*>(d_shard_keys->ptr()); auto d_shard_vals = memory::Alloc(place, sample_size * len * sizeof(int64_t)); int64_t* d_shard_vals_ptr = reinterpret_cast<int64_t*>(d_shard_vals->ptr()); auto d_shard_actual_sample_size = memory::Alloc(place, len * sizeof(int)); int* d_shard_actual_sample_size_ptr = reinterpret_cast<int*>(d_shard_actual_sample_size->ptr()); split_input_to_shard((uint64_t*)(key), d_idx_ptr, len, d_left_ptr, d_right_ptr, gpu_id); heter_comm_kernel_->fill_shard_key(d_shard_keys_ptr, key, d_idx_ptr, len, stream); hipStreamSynchronize(stream); hipMemcpy(h_left, d_left_ptr, total_gpu * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_right, d_right_ptr, total_gpu * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } create_storage(gpu_id, i, shard_len * sizeof(int64_t), shard_len * (1 + sample_size) * sizeof(int64_t) + sizeof(int) * (shard_len + shard_len % 2)); } walk_to_dest(gpu_id, total_gpu, h_left, h_right, (uint64_t*)(d_shard_keys_ptr), NULL); for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; auto& node = path_[gpu_id][i].nodes_.back(); hipMemsetAsync(node.val_storage, -1, shard_len * sizeof(int64_t), node.in_stream); hipStreamSynchronize(node.in_stream); platform::CUDADeviceGuard guard(resource_->dev_id(i)); // If not found, val is -1. tables_[i]->get(reinterpret_cast<uint64_t*>(node.key_storage), reinterpret_cast<int64_t*>(node.val_storage), h_right[i] - h_left[i] + 1, resource_->remote_stream(i, gpu_id)); auto graph = gpu_graph_list[i]; int64_t* id_array = reinterpret_cast<int64_t*>(node.val_storage); int* actual_size_array = (int*)(id_array + shard_len); int64_t* sample_array = (int64_t*)(actual_size_array + shard_len + shard_len % 2); constexpr int WARP_SIZE = 32; constexpr int BLOCK_WARPS = 128 / WARP_SIZE; constexpr int TILE_SIZE = BLOCK_WARPS * 16; const dim3 block(WARP_SIZE, BLOCK_WARPS); const dim3 grid((shard_len + TILE_SIZE - 1) / TILE_SIZE); hipLaunchKernelGGL(( neighbor_sample_example_v2<WARP_SIZE, BLOCK_WARPS, TILE_SIZE>) , dim3(grid), dim3(block), 0, resource_->remote_stream(i, gpu_id), graph, id_array, actual_size_array, sample_array, sample_size, shard_len, default_value); } for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } hipStreamSynchronize(resource_->remote_stream(i, gpu_id)); } move_neighbor_sample_result_to_source_gpu(gpu_id, total_gpu, sample_size, h_left, h_right, d_shard_vals_ptr, d_shard_actual_sample_size_ptr); hipLaunchKernelGGL(( fill_dvalues), dim3(grid_size), dim3(block_size_), 0, stream, d_shard_vals_ptr, val, d_shard_actual_sample_size_ptr, actual_sample_size, d_idx_ptr, sample_size, len); hipStreamSynchronize(stream); if (cpu_query_switch) { // Get cpu keys and corresponding position. thrust::device_vector<int64_t> t_cpu_keys(len); thrust::device_vector<int> t_index(len + 1, 0); hipLaunchKernelGGL(( get_cpu_id_index), dim3(grid_size), dim3(block_size_), 0, stream, key, actual_sample_size, thrust::raw_pointer_cast(t_cpu_keys.data()), thrust::raw_pointer_cast(t_index.data()), thrust::raw_pointer_cast(t_index.data()) + 1, len); hipStreamSynchronize(stream); int number_on_cpu = 0; hipMemcpy(&number_on_cpu, thrust::raw_pointer_cast(t_index.data()), sizeof(int), hipMemcpyDeviceToHost); if (number_on_cpu > 0) { int64_t* cpu_keys = new int64_t[number_on_cpu]; hipMemcpy(cpu_keys, thrust::raw_pointer_cast(t_cpu_keys.data()), number_on_cpu * sizeof(int64_t), hipMemcpyDeviceToHost); std::vector<std::shared_ptr<char>> buffers(number_on_cpu); std::vector<int> ac(number_on_cpu); auto status = cpu_graph_table->random_sample_neighbors( 0, cpu_keys, sample_size, buffers, ac, false); int total_cpu_sample_size = std::accumulate(ac.begin(), ac.end(), 0); total_cpu_sample_size /= sizeof(int64_t); // Merge buffers into one int64_t vector. int64_t* merge_buffers = new int64_t[total_cpu_sample_size]; int start = 0; for (int j = 0; j < number_on_cpu; j++) { memcpy(merge_buffers + start, (int64_t*)(buffers[j].get()), ac[j]); start += ac[j] / sizeof(int64_t); } // Copy merge_buffers to gpu. thrust::device_vector<int64_t> gpu_buffers(total_cpu_sample_size); thrust::device_vector<int> gpu_ac(number_on_cpu); int64_t* gpu_buffers_ptr = thrust::raw_pointer_cast(gpu_buffers.data()); int* gpu_ac_ptr = thrust::raw_pointer_cast(gpu_ac.data()); hipMemcpyAsync(gpu_buffers_ptr, merge_buffers, total_cpu_sample_size * sizeof(int64_t), hipMemcpyHostToDevice, stream); hipMemcpyAsync(gpu_ac_ptr, ac.data(), number_on_cpu * sizeof(int), hipMemcpyHostToDevice, stream); // Copy gpu_buffers and gpu_ac using kernel. // Kernel divide for gpu_ac_ptr. int grid_size2 = (number_on_cpu - 1) / block_size_ + 1; hipLaunchKernelGGL(( get_actual_gpu_ac), dim3(grid_size2), dim3(block_size_), 0, stream, gpu_ac_ptr, number_on_cpu); hipStreamSynchronize(stream); thrust::device_vector<int> cumsum_gpu_ac(number_on_cpu); thrust::exclusive_scan(gpu_ac.begin(), gpu_ac.end(), cumsum_gpu_ac.begin(), 0); constexpr int WARP_SIZE_ = 32; constexpr int BLOCK_WARPS_ = 128 / WARP_SIZE_; constexpr int TILE_SIZE_ = BLOCK_WARPS_ * 16; const dim3 block2(WARP_SIZE_, BLOCK_WARPS_); const dim3 grid2((number_on_cpu + TILE_SIZE_ - 1) / TILE_SIZE_); hipLaunchKernelGGL(( copy_buffer_ac_to_final_place<WARP_SIZE_, BLOCK_WARPS_, TILE_SIZE_>) , dim3(grid2), dim3(block2), 0, stream, gpu_buffers_ptr, gpu_ac_ptr, val, actual_sample_size, thrust::raw_pointer_cast(t_index.data()) + 1, thrust::raw_pointer_cast(cumsum_gpu_ac.data()), number_on_cpu, sample_size); delete[] merge_buffers; delete[] cpu_keys; } } { hipStreamSynchronize(stream); platform::CUDAPlace place = platform::CUDAPlace(resource_->dev_id(gpu_id)); platform::CUDADeviceGuard guard(resource_->dev_id(gpu_id)); thrust::device_vector<int> t_actual_sample_size(len); thrust::copy(actual_sample_size, actual_sample_size + len, t_actual_sample_size.begin()); int total_sample_size = thrust::reduce(t_actual_sample_size.begin(), t_actual_sample_size.end()); result.actual_val_mem = memory::AllocShared(place, total_sample_size * sizeof(int64_t)); result.actual_val = (int64_t*)(result.actual_val_mem)->ptr(); result.set_total_sample_size(total_sample_size); thrust::device_vector<int> cumsum_actual_sample_size(len); thrust::exclusive_scan(t_actual_sample_size.begin(), t_actual_sample_size.end(), cumsum_actual_sample_size.begin(), 0); hipLaunchKernelGGL(( fill_actual_vals), dim3(grid_size), dim3(block_size_), 0, stream, val, result.actual_val, actual_sample_size, thrust::raw_pointer_cast(cumsum_actual_sample_size.data()), sample_size, len); } for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } destroy_storage(gpu_id, i); } hipStreamSynchronize(stream); return result; } NodeQueryResult GpuPsGraphTable::graph_node_sample(int gpu_id, int sample_size) { return NodeQueryResult(); } NodeQueryResult GpuPsGraphTable::query_node_list(int gpu_id, int start, int query_size) { NodeQueryResult result; if (query_size <= 0) return result; int& actual_size = result.actual_sample_size; actual_size = 0; // int dev_id = resource_->dev_id(gpu_id); // platform::CUDADeviceGuard guard(dev_id); std::vector<int> idx, gpu_begin_pos, local_begin_pos; int sample_size; /* if idx[i] = a, gpu_begin_pos[i] = p1, gpu_local_begin_pos[i] = p2; sample_size[i] = s; then on gpu a, the nodes of positions [p1,p1 + s) should be returned and saved from the p2 position on the sample_result array for example: suppose gpu 0 saves [0,2,4,6,8], gpu1 saves [1,3,5,7] start = 3, query_size = 5 we know [6,8,1,3,5] should be returned; idx = [0,1] gpu_begin_pos = [3,0] local_begin_pos = [0,3] sample_size = [2,3] */ std::function<int(int, int, int, int, int&, int&)> range_check = [](int x, int y, int x1, int y1, int& x2, int& y2) { if (y <= x1 || x >= y1) return 0; y2 = min(y, y1); x2 = max(x1, x); return y2 - x2; }; auto graph = gpu_graph_list[gpu_id]; if (graph.node_size == 0) { return result; } int x2, y2; int len = range_check(start, start + query_size, 0, graph.node_size, x2, y2); if (len == 0) { return result; } int64_t* val; sample_size = len; result.initialize(len, resource_->dev_id(gpu_id)); actual_size = len; val = result.val; int dev_id_i = resource_->dev_id(gpu_id); platform::CUDADeviceGuard guard(dev_id_i); // platform::CUDADeviceGuard guard(i); int grid_size = (len - 1) / block_size_ + 1; hipLaunchKernelGGL(( node_query_example), dim3(grid_size), dim3(block_size_), 0, resource_->remote_stream(gpu_id, gpu_id), gpu_graph_list[gpu_id], x2, len, (int64_t*)val); hipStreamSynchronize(resource_->remote_stream(gpu_id, gpu_id)); return result; /* for (int i = 0; i < gpu_graph_list.size() && query_size != 0; i++) { auto graph = gpu_graph_list[i]; if (graph.node_size == 0) { continue; } int x2, y2; int len = range_check(start, start + query_size, size, size + graph.node_size, x2, y2); if (len > 0) { idx.push_back(i); gpu_begin_pos.emplace_back(x2 - size); local_begin_pos.emplace_back(actual_size); sample_size.push_back(len); actual_size += len; create_storage(gpu_id, i, 1, len * sizeof(int64_t)); } size += graph.node_size; } for (int i = 0; i < idx.size(); i++) { int dev_id_i = resource_->dev_id(idx[i]); platform::CUDADeviceGuard guard(dev_id_i); // platform::CUDADeviceGuard guard(i); auto& node = path_[gpu_id][idx[i]].nodes_.front(); int grid_size = (sample_size[i] - 1) / block_size_ + 1; node_query_example<<<grid_size, block_size_, 0, resource_->remote_stream(idx[i], gpu_id)>>>( gpu_graph_list[idx[i]], gpu_begin_pos[i], sample_size[i], (int64_t*)node.val_storage); } for (int i = 0; i < idx.size(); i++) { hipStreamSynchronize(resource_->remote_stream(idx[i], gpu_id)); auto& node = path_[gpu_id][idx[i]].nodes_.front(); hipMemcpyAsync(reinterpret_cast<char*>(val + local_begin_pos[i]), node.val_storage, node.val_bytes_len, hipMemcpyDefault, node.out_stream); } for (int i = 0; i < idx.size(); i++) { auto& node = path_[gpu_id][idx[i]].nodes_.front(); hipStreamSynchronize(node.out_stream); } for (auto x : idx) { destroy_storage(gpu_id, x); } return result; */ } } // namespace framework }; // namespace paddle #endif
acdac74933f8e29b40df88f05c599f93fe79214c.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <functional> #pragma once #ifdef PADDLE_WITH_HETERPS #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" namespace paddle { namespace framework { /* comment 0 this kernel just serves as an example of how to sample nodes' neighbors. feel free to modify it index[0,len) saves the nodes' index actual_size[0,len) is to save the sample size of each node. for ith node in index, actual_size[i] = min(node i's neighbor size, sample size) sample_result is to save the neighbor sampling result, its size is len * sample_size; */ __global__ void get_cpu_id_index(int64_t* key, int* actual_sample_size, int64_t* cpu_key, int* sum, int* index, int len) { CUDA_KERNEL_LOOP(i, len) { if (actual_sample_size[i] == -1) { int old = atomicAdd(sum, 1); cpu_key[old] = key[i]; index[old] = i; // printf("old %d i-%d key:%lld\n",old,i,key[i]); } } } __global__ void get_actual_gpu_ac(int* gpu_ac, int number_on_cpu) { CUDA_KERNEL_LOOP(i, number_on_cpu) { gpu_ac[i] /= sizeof(int64_t); } } template <int WARP_SIZE, int BLOCK_WARPS, int TILE_SIZE> __global__ void copy_buffer_ac_to_final_place( int64_t* gpu_buffer, int* gpu_ac, int64_t* val, int* actual_sample_size, int* index, int* cumsum_gpu_ac, int number_on_cpu, int sample_size) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BLOCK_WARPS); int i = blockIdx.x * TILE_SIZE + threadIdx.y; const int last_idx = min(static_cast<int>(blockIdx.x + 1) * TILE_SIZE, number_on_cpu); while (i < last_idx) { actual_sample_size[index[i]] = gpu_ac[i]; for (int j = threadIdx.x; j < gpu_ac[i]; j += WARP_SIZE) { val[index[i] * sample_size + j] = gpu_buffer[cumsum_gpu_ac[i] + j]; } i += BLOCK_WARPS; } } template <int WARP_SIZE, int BLOCK_WARPS, int TILE_SIZE> __global__ void neighbor_sample_example_v2(GpuPsCommGraph graph, int64_t* node_index, int* actual_size, int64_t* res, int sample_len, int n, int default_value) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BLOCK_WARPS); int i = blockIdx.x * TILE_SIZE + threadIdx.y; const int last_idx = min(static_cast<int>(blockIdx.x + 1) * TILE_SIZE, n); curandState rng; curand_init(blockIdx.x, threadIdx.y * WARP_SIZE + threadIdx.x, 0, &rng); while (i < last_idx) { if (node_index[i] == -1) { actual_size[i] = default_value; i += BLOCK_WARPS; continue; } int neighbor_len = (int)graph.node_list[node_index[i]].neighbor_size; int64_t data_offset = graph.node_list[node_index[i]].neighbor_offset; int offset = i * sample_len; int64_t* data = graph.neighbor_list; if (neighbor_len <= sample_len) { for (int j = threadIdx.x; j < neighbor_len; j += WARP_SIZE) { res[offset + j] = data[data_offset + j]; } actual_size[i] = neighbor_len; } else { for (int j = threadIdx.x; j < sample_len; j += WARP_SIZE) { res[offset + j] = j; } __syncwarp(); for (int j = sample_len + threadIdx.x; j < neighbor_len; j += WARP_SIZE) { const int num = curand(&rng) % (j + 1); if (num < sample_len) { atomicMax(reinterpret_cast<unsigned int*>(res + offset + num), static_cast<unsigned int>(j)); } } __syncwarp(); for (int j = threadIdx.x; j < sample_len; j += WARP_SIZE) { const int64_t perm_idx = res[offset + j] + data_offset; res[offset + j] = data[perm_idx]; } actual_size[i] = sample_len; } i += BLOCK_WARPS; } } __global__ void neighbor_sample_example(GpuPsCommGraph graph, int64_t* node_index, int* actual_size, int64_t* res, int sample_len, int* sample_status, int n, int from) { int id = blockIdx.x * blockDim.y + threadIdx.y; if (id < n) { if (node_index[id] == -1) { actual_size[id] = 0; return; } curandState rng; curand_init(blockIdx.x, threadIdx.x, threadIdx.y, &rng); int64_t index = threadIdx.x; int64_t offset = id * sample_len; int64_t* data = graph.neighbor_list; int64_t data_offset = graph.node_list[node_index[id]].neighbor_offset; int64_t neighbor_len = graph.node_list[node_index[id]].neighbor_size; int ac_len; if (sample_len > neighbor_len) ac_len = neighbor_len; else { ac_len = sample_len; } if (4 * ac_len >= 3 * neighbor_len) { if (index == 0) { res[offset] = curand(&rng) % (neighbor_len - ac_len + 1); } __syncwarp(); int start = res[offset]; while (index < ac_len) { res[offset + index] = data[data_offset + start + index]; index += blockDim.x; } actual_size[id] = ac_len; } else { while (index < ac_len) { int num = curand(&rng) % neighbor_len; int* addr = sample_status + data_offset + num; int expected = *addr; if (!(expected & (1 << from))) { int old = atomicCAS(addr, expected, expected | (1 << from)); if (old == expected) { res[offset + index] = num; index += blockDim.x; } } } __syncwarp(); index = threadIdx.x; while (index < ac_len) { int* addr = sample_status + data_offset + res[offset + index]; int expected, old = *addr; do { expected = old; old = atomicCAS(addr, expected, expected & (~(1 << from))); } while (old != expected); res[offset + index] = data[data_offset + res[offset + index]]; index += blockDim.x; } actual_size[id] = ac_len; } } // const size_t i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < n) { // auto node_index = index[i]; // actual_size[i] = graph.node_list[node_index].neighbor_size < sample_size // ? graph.node_list[node_index].neighbor_size // : sample_size; // int offset = graph.node_list[node_index].neighbor_offset; // for (int j = 0; j < actual_size[i]; j++) { // sample_result[sample_size * i + j] = graph.neighbor_list[offset + j]; // } // } } int GpuPsGraphTable::init_cpu_table( const paddle::distributed::GraphParameter& graph) { cpu_graph_table.reset(new paddle::distributed::GraphTable); cpu_table_status = cpu_graph_table->Initialize(graph); // if (cpu_table_status != 0) return cpu_table_status; // std::function<void(std::vector<GpuPsCommGraph>&)> callback = // [this](std::vector<GpuPsCommGraph>& res) { // pthread_rwlock_wrlock(this->rw_lock.get()); // this->clear_graph_info(); // this->build_graph_from_cpu(res); // pthread_rwlock_unlock(this->rw_lock.get()); // cv_.notify_one(); // }; // cpu_graph_table->set_graph_sample_callback(callback); return cpu_table_status; } // int GpuPsGraphTable::load(const std::string& path, const std::string& param) // { // int status = cpu_graph_table->load(path, param); // if (status != 0) { // return status; // } // std::unique_lock<std::mutex> lock(mutex_); // cpu_graph_table->start_graph_sampling(); // cv_.wait(lock); // return 0; // } /* comment 1 gpu i triggers a neighbor_sample task, when this task is done, this function is called to move the sample result on other gpu back to gup i and aggragate the result. the sample_result is saved on src_sample_res and the actual sample size for each node is saved on actual_sample_size. the number of actual sample_result for key[x] (refer to comment 2 for definition of key) is saved on actual_sample_size[x], since the neighbor size of key[x] might be smaller than sample_size, is saved on src_sample_res [x*sample_size, x*sample_size + actual_sample_size[x]) since before each gpu runs the neighbor_sample task,the key array is shuffled, but we have the idx array to save the original order. when the gpu i gets all the sample results from other gpus, it relies on idx array to recover the original order. that's what fill_dvals does. */ void GpuPsGraphTable::display_sample_res(void* key, void* val, int len, int sample_len) { char key_buffer[len * sizeof(int64_t)]; char val_buffer[sample_len * sizeof(int64_t) * len + (len + len % 2) * sizeof(int) + len * sizeof(int64_t)]; cudaMemcpy(key_buffer, key, sizeof(int64_t) * len, cudaMemcpyDeviceToHost); cudaMemcpy(val_buffer, val, sample_len * sizeof(int64_t) * len + (len + len % 2) * sizeof(int) + len * sizeof(int64_t), cudaMemcpyDeviceToHost); int64_t* sample_val = (int64_t*)(val_buffer + (len + len % 2) * sizeof(int) + len * sizeof(int64_t)); for (int i = 0; i < len; i++) { printf("key %lld\n", *(int64_t*)(key_buffer + i * sizeof(int64_t))); printf("index %lld\n", *(int64_t*)(val_buffer + i * sizeof(int64_t))); int ac_size = *(int*)(val_buffer + i * sizeof(int) + len * sizeof(int64_t)); printf("sampled %d neigbhors\n", ac_size); for (int j = 0; j < ac_size; j++) { printf("%lld ", sample_val[i * sample_len + j]); } printf("\n"); } } void GpuPsGraphTable::move_neighbor_sample_result_to_source_gpu( int start_index, int gpu_num, int sample_size, int* h_left, int* h_right, int64_t* src_sample_res, int* actual_sample_size) { int shard_len[gpu_num]; for (int i = 0; i < gpu_num; i++) { if (h_left[i] == -1 || h_right[i] == -1) { continue; } shard_len[i] = h_right[i] - h_left[i] + 1; int cur_step = (int)path_[start_index][i].nodes_.size() - 1; for (int j = cur_step; j > 0; j--) { cudaMemcpyAsync(path_[start_index][i].nodes_[j - 1].val_storage, path_[start_index][i].nodes_[j].val_storage, path_[start_index][i].nodes_[j - 1].val_bytes_len, cudaMemcpyDefault, path_[start_index][i].nodes_[j - 1].out_stream); } auto& node = path_[start_index][i].nodes_.front(); cudaMemcpyAsync( reinterpret_cast<char*>(src_sample_res + h_left[i] * sample_size), node.val_storage + sizeof(int64_t) * shard_len[i] + sizeof(int) * (shard_len[i] + shard_len[i] % 2), sizeof(int64_t) * shard_len[i] * sample_size, cudaMemcpyDefault, node.out_stream); cudaMemcpyAsync(reinterpret_cast<char*>(actual_sample_size + h_left[i]), node.val_storage + sizeof(int64_t) * shard_len[i], sizeof(int) * shard_len[i], cudaMemcpyDefault, node.out_stream); } for (int i = 0; i < gpu_num; ++i) { if (h_left[i] == -1 || h_right[i] == -1) { continue; } auto& node = path_[start_index][i].nodes_.front(); cudaStreamSynchronize(node.out_stream); // cudaStreamSynchronize(resource_->remote_stream(i, start_index)); } /* std::queue<CopyTask> que; // auto& node = path_[gpu_id][i].nodes_.front(); // cudaMemcpyAsync( // reinterpret_cast<char*>(src_sample_res + h_left[i] * sample_size), // node.val_storage + sizeof(int64_t) * shard_len, // node.val_bytes_len - sizeof(int64_t) * shard_len, cudaMemcpyDefault, // node.out_stream); // cudaMemcpyAsync(reinterpret_cast<char*>(actual_sample_size + h_left[i]), // node.val_storage + sizeof(int) * shard_len, // sizeof(int) * shard_len, cudaMemcpyDefault, // node.out_stream); int cur_step = path_[start_index][i].nodes_.size() - 1; auto& node = path_[start_index][i].nodes_[cur_step]; if (cur_step == 0) { // cudaMemcpyAsync(reinterpret_cast<char*>(src_val + h_left[i]), // node.val_storage, node.val_bytes_len, // cudaMemcpyDefault, // node.out_stream); // VLOG(0)<<"copy "<<node.gpu_num<<" to "<<start_index; cudaMemcpyAsync( reinterpret_cast<char*>(src_sample_res + h_left[i] * sample_size), node.val_storage + sizeof(int64_t) * shard_len[i], node.val_bytes_len - sizeof(int64_t) * shard_len[i], cudaMemcpyDefault, node.out_stream); //resource_->remote_stream(i, start_index)); cudaMemcpyAsync(reinterpret_cast<char*>(actual_sample_size + h_left[i]), node.val_storage + sizeof(int) * shard_len[i], sizeof(int) * shard_len[i], cudaMemcpyDefault, node.out_stream); //resource_->remote_stream(i, start_index)); } else { CopyTask t(&path_[start_index][i], cur_step - 1); que.push(t); // VLOG(0)<<"copy "<<node.gpu_num<<" to "<<path_[start_index][i].nodes_[cur_step - 1].gpu_num; cudaMemcpyAsync(path_[start_index][i].nodes_[cur_step - 1].val_storage, node.val_storage, path_[start_index][i].nodes_[cur_step - 1].val_bytes_len, cudaMemcpyDefault, path_[start_index][i].nodes_[cur_step - 1].out_stream); //resource_->remote_stream(i, start_index)); } } while (!que.empty()) { CopyTask& cur_task = que.front(); que.pop(); int cur_step = cur_task.step; if (cur_task.path->nodes_[cur_step].sync) { cudaStreamSynchronize(cur_task.path->nodes_[cur_step].out_stream); //cudaStreamSynchronize(resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); } if (cur_step > 0) { CopyTask c(cur_task.path, cur_step - 1); que.push(c); cudaMemcpyAsync(cur_task.path->nodes_[cur_step - 1].val_storage, cur_task.path->nodes_[cur_step].val_storage, cur_task.path->nodes_[cur_step - 1].val_bytes_len, cudaMemcpyDefault, cur_task.path->nodes_[cur_step - 1].out_stream); //resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); } else if (cur_step == 0) { int end_index = cur_task.path->nodes_.back().gpu_num; // cudaMemcpyAsync(reinterpret_cast<char*>(src_val + h_left[end_index]), // cur_task.path->nodes_[cur_step].val_storage, // cur_task.path->nodes_[cur_step].val_bytes_len, // cudaMemcpyDefault, // cur_task.path->nodes_[cur_step].out_stream); //VLOG(0)<<"copy "<<cur_task.path->nodes_[cur_step].gpu_num<< " to "<<start_index; cudaMemcpyAsync(reinterpret_cast<char*>(src_sample_res + h_left[end_index] * sample_size), cur_task.path->nodes_[cur_step].val_storage + sizeof(int64_t) * shard_len[end_index], cur_task.path->nodes_[cur_step].val_bytes_len - sizeof(int64_t) * shard_len[end_index], cudaMemcpyDefault, cur_task.path->nodes_[cur_step].out_stream); //resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); cudaMemcpyAsync( reinterpret_cast<char*>(actual_sample_size + h_left[end_index]), cur_task.path->nodes_[cur_step].val_storage + sizeof(int) * shard_len[end_index], sizeof(int) * shard_len[end_index], cudaMemcpyDefault, cur_task.path->nodes_[cur_step].out_stream); //resource_->remote_stream(cur_task.path->nodes_.back().gpu_num, start_index)); } } for (int i = 0; i < gpu_num; ++i) { if (h_left[i] == -1 || h_right[i] == -1) { continue; } auto& node = path_[start_index][i].nodes_.front(); cudaStreamSynchronize(node.out_stream); //cudaStreamSynchronize(resource_->remote_stream(i, start_index)); } */ } /* TODO: how to optimize it to eliminate the for loop */ __global__ void fill_dvalues(int64_t* d_shard_vals, int64_t* d_vals, int* d_shard_actual_sample_size, int* d_actual_sample_size, int* idx, int sample_size, int len) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { d_actual_sample_size[idx[i]] = d_shard_actual_sample_size[i]; for (int j = 0; j < sample_size; j++) { d_vals[idx[i] * sample_size + j] = d_shard_vals[i * sample_size + j]; } } } __global__ void fill_actual_vals(int64_t* vals, int64_t* actual_vals, int* actual_sample_size, int* cumsum_actual_sample_size, int sample_size, int len) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { for (int j = 0; j < actual_sample_size[i]; j++) { actual_vals[cumsum_actual_sample_size[i] + j] = vals[sample_size * i + j]; } } } __global__ void node_query_example(GpuPsCommGraph graph, int start, int size, int64_t* res) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { res[i] = graph.node_list[start + i].node_id; } } void GpuPsGraphTable::clear_graph_info(int gpu_id) { if (tables_.size() && tables_[gpu_id] != NULL) { delete tables_[gpu_id]; } auto& graph = gpu_graph_list[gpu_id]; if (graph.neighbor_list != NULL) { cudaFree(graph.neighbor_list); } if (graph.node_list != NULL) { cudaFree(graph.node_list); } } void GpuPsGraphTable::clear_graph_info() { if (tables_.size()) { for (auto table : tables_) delete table; } tables_.clear(); for (auto graph : gpu_graph_list) { if (graph.neighbor_list != NULL) { cudaFree(graph.neighbor_list); } if (graph.node_list != NULL) { cudaFree(graph.node_list); } } gpu_graph_list.clear(); } /* the parameter std::vector<GpuPsCommGraph> cpu_graph_list is generated by cpu. it saves the graph to be saved on each gpu. for the ith GpuPsCommGraph, any the node's key satisfies that key % gpu_number == i In this function, memory is allocated on each gpu to save the graphs, gpu i saves the ith graph from cpu_graph_list */ void GpuPsGraphTable::build_graph_on_single_gpu(GpuPsCommGraph& g, int i) { clear_graph_info(i); platform::CUDADeviceGuard guard(resource_->dev_id(i)); // platform::CUDADeviceGuard guard(i); gpu_graph_list[i] = GpuPsCommGraph(); sample_status[i] = NULL; tables_[i] = new Table(std::max((int64_t)1, g.node_size) / load_factor_); if (g.node_size > 0) { std::vector<int64_t> keys; std::vector<int64_t> offset; cudaMalloc((void**)&gpu_graph_list[i].node_list, g.node_size * sizeof(GpuPsGraphNode)); cudaMemcpy(gpu_graph_list[i].node_list, g.node_list, g.node_size * sizeof(GpuPsGraphNode), cudaMemcpyHostToDevice); for (int64_t j = 0; j < g.node_size; j++) { keys.push_back(g.node_list[j].node_id); offset.push_back(j); } build_ps(i, (uint64_t*)keys.data(), offset.data(), keys.size(), 1024, 8); gpu_graph_list[i].node_size = g.node_size; } else { build_ps(i, NULL, NULL, 0, 1024, 8); gpu_graph_list[i].node_list = NULL; gpu_graph_list[i].node_size = 0; } if (g.neighbor_size) { cudaError_t cudaStatus = cudaMalloc((void**)&gpu_graph_list[i].neighbor_list, g.neighbor_size * sizeof(int64_t)); PADDLE_ENFORCE_EQ(cudaStatus, cudaSuccess, platform::errors::InvalidArgument( "ailed to allocate memory for graph on gpu ")); VLOG(0) << "sucessfully allocate " << g.neighbor_size * sizeof(int64_t) << " bytes of memory for graph-edges on gpu " << resource_->dev_id(i); cudaMemcpy(gpu_graph_list[i].neighbor_list, g.neighbor_list, g.neighbor_size * sizeof(int64_t), cudaMemcpyHostToDevice); gpu_graph_list[i].neighbor_size = g.neighbor_size; } else { gpu_graph_list[i].neighbor_list = NULL; gpu_graph_list[i].neighbor_size = 0; } } void GpuPsGraphTable::init_sample_status() { for (int i = 0; i < gpu_num; i++) { if (gpu_graph_list[i].neighbor_size) { platform::CUDADeviceGuard guard(resource_->dev_id(i)); int* addr; cudaMalloc((void**)&addr, gpu_graph_list[i].neighbor_size * sizeof(int)); cudaMemset(addr, 0, gpu_graph_list[i].neighbor_size * sizeof(int)); sample_status[i] = addr; } } } void GpuPsGraphTable::free_sample_status() { for (int i = 0; i < gpu_num; i++) { if (sample_status[i] != NULL) { platform::CUDADeviceGuard guard(resource_->dev_id(i)); cudaFree(sample_status[i]); } } } void GpuPsGraphTable::build_graph_from_cpu( std::vector<GpuPsCommGraph>& cpu_graph_list) { VLOG(0) << "in build_graph_from_cpu cpu_graph_list size = " << cpu_graph_list.size(); PADDLE_ENFORCE_EQ( cpu_graph_list.size(), resource_->total_device(), platform::errors::InvalidArgument("the cpu node list size doesn't match " "the number of gpu on your machine.")); clear_graph_info(); for (int i = 0; i < cpu_graph_list.size(); i++) { platform::CUDADeviceGuard guard(resource_->dev_id(i)); gpu_graph_list[i] = GpuPsCommGraph(); sample_status[i] = NULL; tables_[i] = new Table(std::max((int64_t)1, cpu_graph_list[i].node_size) / load_factor_); if (cpu_graph_list[i].node_size > 0) { std::vector<int64_t> keys; std::vector<int64_t> offset; cudaMalloc((void**)&gpu_graph_list[i].node_list, cpu_graph_list[i].node_size * sizeof(GpuPsGraphNode)); cudaMemcpy(gpu_graph_list[i].node_list, cpu_graph_list[i].node_list, cpu_graph_list[i].node_size * sizeof(GpuPsGraphNode), cudaMemcpyHostToDevice); for (int64_t j = 0; j < cpu_graph_list[i].node_size; j++) { keys.push_back(cpu_graph_list[i].node_list[j].node_id); offset.push_back(j); } build_ps(i, (uint64_t*)(keys.data()), offset.data(), keys.size(), 1024, 8); gpu_graph_list[i].node_size = cpu_graph_list[i].node_size; } else { build_ps(i, NULL, NULL, 0, 1024, 8); gpu_graph_list[i].node_list = NULL; gpu_graph_list[i].node_size = 0; } if (cpu_graph_list[i].neighbor_size) { cudaMalloc((void**)&gpu_graph_list[i].neighbor_list, cpu_graph_list[i].neighbor_size * sizeof(int64_t)); cudaMemcpy(gpu_graph_list[i].neighbor_list, cpu_graph_list[i].neighbor_list, cpu_graph_list[i].neighbor_size * sizeof(int64_t), cudaMemcpyHostToDevice); gpu_graph_list[i].neighbor_size = cpu_graph_list[i].neighbor_size; } else { gpu_graph_list[i].neighbor_list = NULL; gpu_graph_list[i].neighbor_size = 0; } } cudaDeviceSynchronize(); } NeighborSampleResult GpuPsGraphTable::graph_neighbor_sample_v3( NeighborSampleQuery q, bool cpu_switch) { return graph_neighbor_sample_v2(global_device_map[q.gpu_id], q.key, q.sample_size, q.len, cpu_switch); } NeighborSampleResult GpuPsGraphTable::graph_neighbor_sample(int gpu_id, int64_t* key, int sample_size, int len) { /* comment 2 this function shares some kernels with heter_comm_inl.h arguments definitions: gpu_id:the id of gpu. len:how many keys are used,(the length of array key) sample_size:how many neighbors should be sampled for each node in key. the code below shuffle the key array to make the keys that belong to a gpu-card stay together, the shuffled result is saved on d_shard_keys, if ith element in d_shard_keys_ptr is from jth element in the original key array, then idx[i] = j, idx could be used to recover the original array. if keys in range [a,b] belong to ith-gpu, then h_left[i] = a, h_right[i] = b, if no keys are allocated for ith-gpu, then h_left[i] == h_right[i] == -1 for example, suppose key = [0,1,2,3,4,5,6,7,8], gpu_num = 2 when we run this neighbor_sample function, the key is shuffled to [0,2,4,6,8,1,3,5,7] the first part (0,2,4,6,8) % 2 == 0,thus should be handled by gpu 0, the rest part should be handled by gpu1, because (1,3,5,7) % 2 == 1, h_left = [0,5],h_right = [4,8] */ NeighborSampleResult result; result.initialize(sample_size, len, resource_->dev_id(gpu_id)); if (len == 0) { return result; } platform::CUDAPlace place = platform::CUDAPlace(resource_->dev_id(gpu_id)); platform::CUDADeviceGuard guard(resource_->dev_id(gpu_id)); int* actual_sample_size = result.actual_sample_size; int64_t* val = result.val; int total_gpu = resource_->total_device(); auto stream = resource_->local_stream(gpu_id, 0); int grid_size = (len - 1) / block_size_ + 1; int h_left[total_gpu]; // NOLINT int h_right[total_gpu]; // NOLINT auto d_left = memory::Alloc(place, total_gpu * sizeof(int)); auto d_right = memory::Alloc(place, total_gpu * sizeof(int)); int* d_left_ptr = reinterpret_cast<int*>(d_left->ptr()); int* d_right_ptr = reinterpret_cast<int*>(d_right->ptr()); cudaMemsetAsync(d_left_ptr, -1, total_gpu * sizeof(int), stream); cudaMemsetAsync(d_right_ptr, -1, total_gpu * sizeof(int), stream); // auto d_idx = memory::Alloc(place, len * sizeof(int)); int* d_idx_ptr = reinterpret_cast<int*>(d_idx->ptr()); auto d_shard_keys = memory::Alloc(place, len * sizeof(int64_t)); int64_t* d_shard_keys_ptr = reinterpret_cast<int64_t*>(d_shard_keys->ptr()); auto d_shard_vals = memory::Alloc(place, sample_size * len * sizeof(int64_t)); int64_t* d_shard_vals_ptr = reinterpret_cast<int64_t*>(d_shard_vals->ptr()); auto d_shard_actual_sample_size = memory::Alloc(place, len * sizeof(int)); int* d_shard_actual_sample_size_ptr = reinterpret_cast<int*>(d_shard_actual_sample_size->ptr()); split_input_to_shard((uint64_t*)(key), d_idx_ptr, len, d_left_ptr, d_right_ptr, gpu_id); heter_comm_kernel_->fill_shard_key(d_shard_keys_ptr, key, d_idx_ptr, len, stream); cudaStreamSynchronize(stream); cudaMemcpy(h_left, d_left_ptr, total_gpu * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_right, d_right_ptr, total_gpu * sizeof(int), cudaMemcpyDeviceToHost); // auto start1 = std::chrono::steady_clock::now(); for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } /* comment 3 shard_len denotes the size of keys on i-th gpu here, when we sample on i-th gpu, we allocate shard_len * (1 + sample_size) int64_t units of memory, we use alloc_mem_i to denote it, the range [0,shard_len) is saved for the respective nodes' indexes and acutal sample_size. with nodes' indexes we could get the nodes to sample. since size of int64_t is 8 bits, while size of int is 4, the range of [0,shard_len) contains shard_len * 2 int uinits; The values of the first half of this range will be updated by the k-v map on i-th-gpu. The second half of this range is saved for actual sample size of each node. For node x, its sampling result is saved on the range [shard_len + sample_size * x,shard_len + sample_size * x + actual_sample_size_of_x) of alloc_mem_i, actual_sample_size_of_x equals ((int *)alloc_mem_i)[shard_len + x] */ create_storage(gpu_id, i, shard_len * sizeof(int64_t), shard_len * (1 + sample_size) * sizeof(int64_t) + sizeof(int) * (shard_len + shard_len % 2)); // auto& node = path_[gpu_id][i].nodes_[0]; } walk_to_dest(gpu_id, total_gpu, h_left, h_right, (uint64_t*)(d_shard_keys_ptr), NULL); for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; auto& node = path_[gpu_id][i].nodes_.back(); cudaMemsetAsync(node.val_storage, -1, shard_len * sizeof(int64_t), node.in_stream); cudaStreamSynchronize(node.in_stream); platform::CUDADeviceGuard guard(resource_->dev_id(i)); tables_[i]->get(reinterpret_cast<uint64_t*>(node.key_storage), reinterpret_cast<int64_t*>(node.val_storage), h_right[i] - h_left[i] + 1, resource_->remote_stream(i, gpu_id)); // node.in_stream); auto graph = gpu_graph_list[i]; int64_t* id_array = reinterpret_cast<int64_t*>(node.val_storage); int* actual_size_array = (int*)(id_array + shard_len); int64_t* sample_array = (int64_t*)(actual_size_array + shard_len + shard_len % 2); int sample_grid_size = (shard_len - 1) / dim_y + 1; dim3 block(parallel_sample_size, dim_y); dim3 grid(sample_grid_size); neighbor_sample_example<<<grid, block, 0, resource_->remote_stream(i, gpu_id)>>>( graph, id_array, actual_size_array, sample_array, sample_size, sample_status[i], shard_len, gpu_id); } for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } cudaStreamSynchronize(resource_->remote_stream(i, gpu_id)); } move_neighbor_sample_result_to_source_gpu(gpu_id, total_gpu, sample_size, h_left, h_right, d_shard_vals_ptr, d_shard_actual_sample_size_ptr); fill_dvalues<<<grid_size, block_size_, 0, stream>>>( d_shard_vals_ptr, val, d_shard_actual_sample_size_ptr, actual_sample_size, d_idx_ptr, sample_size, len); for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } destroy_storage(gpu_id, i); } cudaStreamSynchronize(stream); return result; } NeighborSampleResult GpuPsGraphTable::graph_neighbor_sample_v2( int gpu_id, int64_t* key, int sample_size, int len, bool cpu_query_switch) { NeighborSampleResult result; result.initialize(sample_size, len, resource_->dev_id(gpu_id)); if (len == 0) { return result; } platform::CUDAPlace place = platform::CUDAPlace(resource_->dev_id(gpu_id)); platform::CUDADeviceGuard guard(resource_->dev_id(gpu_id)); int* actual_sample_size = result.actual_sample_size; int64_t* val = result.val; int total_gpu = resource_->total_device(); auto stream = resource_->local_stream(gpu_id, 0); int grid_size = (len - 1) / block_size_ + 1; int h_left[total_gpu]; // NOLINT int h_right[total_gpu]; // NOLINT auto d_left = memory::Alloc(place, total_gpu * sizeof(int)); auto d_right = memory::Alloc(place, total_gpu * sizeof(int)); int* d_left_ptr = reinterpret_cast<int*>(d_left->ptr()); int* d_right_ptr = reinterpret_cast<int*>(d_right->ptr()); int default_value = 0; if (cpu_query_switch) { default_value = -1; } cudaMemsetAsync(d_left_ptr, -1, total_gpu * sizeof(int), stream); cudaMemsetAsync(d_right_ptr, -1, total_gpu * sizeof(int), stream); // auto d_idx = memory::Alloc(place, len * sizeof(int)); int* d_idx_ptr = reinterpret_cast<int*>(d_idx->ptr()); auto d_shard_keys = memory::Alloc(place, len * sizeof(int64_t)); int64_t* d_shard_keys_ptr = reinterpret_cast<int64_t*>(d_shard_keys->ptr()); auto d_shard_vals = memory::Alloc(place, sample_size * len * sizeof(int64_t)); int64_t* d_shard_vals_ptr = reinterpret_cast<int64_t*>(d_shard_vals->ptr()); auto d_shard_actual_sample_size = memory::Alloc(place, len * sizeof(int)); int* d_shard_actual_sample_size_ptr = reinterpret_cast<int*>(d_shard_actual_sample_size->ptr()); split_input_to_shard((uint64_t*)(key), d_idx_ptr, len, d_left_ptr, d_right_ptr, gpu_id); heter_comm_kernel_->fill_shard_key(d_shard_keys_ptr, key, d_idx_ptr, len, stream); cudaStreamSynchronize(stream); cudaMemcpy(h_left, d_left_ptr, total_gpu * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_right, d_right_ptr, total_gpu * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } create_storage(gpu_id, i, shard_len * sizeof(int64_t), shard_len * (1 + sample_size) * sizeof(int64_t) + sizeof(int) * (shard_len + shard_len % 2)); } walk_to_dest(gpu_id, total_gpu, h_left, h_right, (uint64_t*)(d_shard_keys_ptr), NULL); for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; auto& node = path_[gpu_id][i].nodes_.back(); cudaMemsetAsync(node.val_storage, -1, shard_len * sizeof(int64_t), node.in_stream); cudaStreamSynchronize(node.in_stream); platform::CUDADeviceGuard guard(resource_->dev_id(i)); // If not found, val is -1. tables_[i]->get(reinterpret_cast<uint64_t*>(node.key_storage), reinterpret_cast<int64_t*>(node.val_storage), h_right[i] - h_left[i] + 1, resource_->remote_stream(i, gpu_id)); auto graph = gpu_graph_list[i]; int64_t* id_array = reinterpret_cast<int64_t*>(node.val_storage); int* actual_size_array = (int*)(id_array + shard_len); int64_t* sample_array = (int64_t*)(actual_size_array + shard_len + shard_len % 2); constexpr int WARP_SIZE = 32; constexpr int BLOCK_WARPS = 128 / WARP_SIZE; constexpr int TILE_SIZE = BLOCK_WARPS * 16; const dim3 block(WARP_SIZE, BLOCK_WARPS); const dim3 grid((shard_len + TILE_SIZE - 1) / TILE_SIZE); neighbor_sample_example_v2<WARP_SIZE, BLOCK_WARPS, TILE_SIZE> <<<grid, block, 0, resource_->remote_stream(i, gpu_id)>>>( graph, id_array, actual_size_array, sample_array, sample_size, shard_len, default_value); } for (int i = 0; i < total_gpu; ++i) { if (h_left[i] == -1) { continue; } cudaStreamSynchronize(resource_->remote_stream(i, gpu_id)); } move_neighbor_sample_result_to_source_gpu(gpu_id, total_gpu, sample_size, h_left, h_right, d_shard_vals_ptr, d_shard_actual_sample_size_ptr); fill_dvalues<<<grid_size, block_size_, 0, stream>>>( d_shard_vals_ptr, val, d_shard_actual_sample_size_ptr, actual_sample_size, d_idx_ptr, sample_size, len); cudaStreamSynchronize(stream); if (cpu_query_switch) { // Get cpu keys and corresponding position. thrust::device_vector<int64_t> t_cpu_keys(len); thrust::device_vector<int> t_index(len + 1, 0); get_cpu_id_index<<<grid_size, block_size_, 0, stream>>>( key, actual_sample_size, thrust::raw_pointer_cast(t_cpu_keys.data()), thrust::raw_pointer_cast(t_index.data()), thrust::raw_pointer_cast(t_index.data()) + 1, len); cudaStreamSynchronize(stream); int number_on_cpu = 0; cudaMemcpy(&number_on_cpu, thrust::raw_pointer_cast(t_index.data()), sizeof(int), cudaMemcpyDeviceToHost); if (number_on_cpu > 0) { int64_t* cpu_keys = new int64_t[number_on_cpu]; cudaMemcpy(cpu_keys, thrust::raw_pointer_cast(t_cpu_keys.data()), number_on_cpu * sizeof(int64_t), cudaMemcpyDeviceToHost); std::vector<std::shared_ptr<char>> buffers(number_on_cpu); std::vector<int> ac(number_on_cpu); auto status = cpu_graph_table->random_sample_neighbors( 0, cpu_keys, sample_size, buffers, ac, false); int total_cpu_sample_size = std::accumulate(ac.begin(), ac.end(), 0); total_cpu_sample_size /= sizeof(int64_t); // Merge buffers into one int64_t vector. int64_t* merge_buffers = new int64_t[total_cpu_sample_size]; int start = 0; for (int j = 0; j < number_on_cpu; j++) { memcpy(merge_buffers + start, (int64_t*)(buffers[j].get()), ac[j]); start += ac[j] / sizeof(int64_t); } // Copy merge_buffers to gpu. thrust::device_vector<int64_t> gpu_buffers(total_cpu_sample_size); thrust::device_vector<int> gpu_ac(number_on_cpu); int64_t* gpu_buffers_ptr = thrust::raw_pointer_cast(gpu_buffers.data()); int* gpu_ac_ptr = thrust::raw_pointer_cast(gpu_ac.data()); cudaMemcpyAsync(gpu_buffers_ptr, merge_buffers, total_cpu_sample_size * sizeof(int64_t), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(gpu_ac_ptr, ac.data(), number_on_cpu * sizeof(int), cudaMemcpyHostToDevice, stream); // Copy gpu_buffers and gpu_ac using kernel. // Kernel divide for gpu_ac_ptr. int grid_size2 = (number_on_cpu - 1) / block_size_ + 1; get_actual_gpu_ac<<<grid_size2, block_size_, 0, stream>>>(gpu_ac_ptr, number_on_cpu); cudaStreamSynchronize(stream); thrust::device_vector<int> cumsum_gpu_ac(number_on_cpu); thrust::exclusive_scan(gpu_ac.begin(), gpu_ac.end(), cumsum_gpu_ac.begin(), 0); constexpr int WARP_SIZE_ = 32; constexpr int BLOCK_WARPS_ = 128 / WARP_SIZE_; constexpr int TILE_SIZE_ = BLOCK_WARPS_ * 16; const dim3 block2(WARP_SIZE_, BLOCK_WARPS_); const dim3 grid2((number_on_cpu + TILE_SIZE_ - 1) / TILE_SIZE_); copy_buffer_ac_to_final_place<WARP_SIZE_, BLOCK_WARPS_, TILE_SIZE_> <<<grid2, block2, 0, stream>>>( gpu_buffers_ptr, gpu_ac_ptr, val, actual_sample_size, thrust::raw_pointer_cast(t_index.data()) + 1, thrust::raw_pointer_cast(cumsum_gpu_ac.data()), number_on_cpu, sample_size); delete[] merge_buffers; delete[] cpu_keys; } } { cudaStreamSynchronize(stream); platform::CUDAPlace place = platform::CUDAPlace(resource_->dev_id(gpu_id)); platform::CUDADeviceGuard guard(resource_->dev_id(gpu_id)); thrust::device_vector<int> t_actual_sample_size(len); thrust::copy(actual_sample_size, actual_sample_size + len, t_actual_sample_size.begin()); int total_sample_size = thrust::reduce(t_actual_sample_size.begin(), t_actual_sample_size.end()); result.actual_val_mem = memory::AllocShared(place, total_sample_size * sizeof(int64_t)); result.actual_val = (int64_t*)(result.actual_val_mem)->ptr(); result.set_total_sample_size(total_sample_size); thrust::device_vector<int> cumsum_actual_sample_size(len); thrust::exclusive_scan(t_actual_sample_size.begin(), t_actual_sample_size.end(), cumsum_actual_sample_size.begin(), 0); fill_actual_vals<<<grid_size, block_size_, 0, stream>>>( val, result.actual_val, actual_sample_size, thrust::raw_pointer_cast(cumsum_actual_sample_size.data()), sample_size, len); } for (int i = 0; i < total_gpu; ++i) { int shard_len = h_left[i] == -1 ? 0 : h_right[i] - h_left[i] + 1; if (shard_len == 0) { continue; } destroy_storage(gpu_id, i); } cudaStreamSynchronize(stream); return result; } NodeQueryResult GpuPsGraphTable::graph_node_sample(int gpu_id, int sample_size) { return NodeQueryResult(); } NodeQueryResult GpuPsGraphTable::query_node_list(int gpu_id, int start, int query_size) { NodeQueryResult result; if (query_size <= 0) return result; int& actual_size = result.actual_sample_size; actual_size = 0; // int dev_id = resource_->dev_id(gpu_id); // platform::CUDADeviceGuard guard(dev_id); std::vector<int> idx, gpu_begin_pos, local_begin_pos; int sample_size; /* if idx[i] = a, gpu_begin_pos[i] = p1, gpu_local_begin_pos[i] = p2; sample_size[i] = s; then on gpu a, the nodes of positions [p1,p1 + s) should be returned and saved from the p2 position on the sample_result array for example: suppose gpu 0 saves [0,2,4,6,8], gpu1 saves [1,3,5,7] start = 3, query_size = 5 we know [6,8,1,3,5] should be returned; idx = [0,1] gpu_begin_pos = [3,0] local_begin_pos = [0,3] sample_size = [2,3] */ std::function<int(int, int, int, int, int&, int&)> range_check = [](int x, int y, int x1, int y1, int& x2, int& y2) { if (y <= x1 || x >= y1) return 0; y2 = min(y, y1); x2 = max(x1, x); return y2 - x2; }; auto graph = gpu_graph_list[gpu_id]; if (graph.node_size == 0) { return result; } int x2, y2; int len = range_check(start, start + query_size, 0, graph.node_size, x2, y2); if (len == 0) { return result; } int64_t* val; sample_size = len; result.initialize(len, resource_->dev_id(gpu_id)); actual_size = len; val = result.val; int dev_id_i = resource_->dev_id(gpu_id); platform::CUDADeviceGuard guard(dev_id_i); // platform::CUDADeviceGuard guard(i); int grid_size = (len - 1) / block_size_ + 1; node_query_example<<<grid_size, block_size_, 0, resource_->remote_stream(gpu_id, gpu_id)>>>( gpu_graph_list[gpu_id], x2, len, (int64_t*)val); cudaStreamSynchronize(resource_->remote_stream(gpu_id, gpu_id)); return result; /* for (int i = 0; i < gpu_graph_list.size() && query_size != 0; i++) { auto graph = gpu_graph_list[i]; if (graph.node_size == 0) { continue; } int x2, y2; int len = range_check(start, start + query_size, size, size + graph.node_size, x2, y2); if (len > 0) { idx.push_back(i); gpu_begin_pos.emplace_back(x2 - size); local_begin_pos.emplace_back(actual_size); sample_size.push_back(len); actual_size += len; create_storage(gpu_id, i, 1, len * sizeof(int64_t)); } size += graph.node_size; } for (int i = 0; i < idx.size(); i++) { int dev_id_i = resource_->dev_id(idx[i]); platform::CUDADeviceGuard guard(dev_id_i); // platform::CUDADeviceGuard guard(i); auto& node = path_[gpu_id][idx[i]].nodes_.front(); int grid_size = (sample_size[i] - 1) / block_size_ + 1; node_query_example<<<grid_size, block_size_, 0, resource_->remote_stream(idx[i], gpu_id)>>>( gpu_graph_list[idx[i]], gpu_begin_pos[i], sample_size[i], (int64_t*)node.val_storage); } for (int i = 0; i < idx.size(); i++) { cudaStreamSynchronize(resource_->remote_stream(idx[i], gpu_id)); auto& node = path_[gpu_id][idx[i]].nodes_.front(); cudaMemcpyAsync(reinterpret_cast<char*>(val + local_begin_pos[i]), node.val_storage, node.val_bytes_len, cudaMemcpyDefault, node.out_stream); } for (int i = 0; i < idx.size(); i++) { auto& node = path_[gpu_id][idx[i]].nodes_.front(); cudaStreamSynchronize(node.out_stream); } for (auto x : idx) { destroy_storage(gpu_id, x); } return result; */ } } // namespace framework }; // namespace paddle #endif
04b335d3afb5cfacb055bf425aad648701007d86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void VecAdd(float* A, float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; }; extern "C" void invoke_VecAdd(float* d_A, float* d_B, float* d_C, int N) { int threadsPerBlock = 256; int blocksPerGrid = N / threadsPerBlock; hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N); };
04b335d3afb5cfacb055bf425aad648701007d86.cu
__global__ void VecAdd(float* A, float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; }; extern "C" void invoke_VecAdd(float* d_A, float* d_B, float* d_C, int N) { int threadsPerBlock = 256; int blocksPerGrid = N / threadsPerBlock; VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); };
106b6f65a16d73c55993b8ec0c80b51becd4f144.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "difference.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *actual = NULL; hipMalloc(&actual, XSIZE*YSIZE); double *target = NULL; hipMalloc(&target, XSIZE*YSIZE); double *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( difference), dim3(gridBlock),dim3(threadBlock), 0, 0, n,actual,target,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( difference), dim3(gridBlock),dim3(threadBlock), 0, 0, n,actual,target,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( difference), dim3(gridBlock),dim3(threadBlock), 0, 0, n,actual,target,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
106b6f65a16d73c55993b8ec0c80b51becd4f144.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "difference.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *actual = NULL; cudaMalloc(&actual, XSIZE*YSIZE); double *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); double *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); difference<<<gridBlock,threadBlock>>>(n,actual,target,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { difference<<<gridBlock,threadBlock>>>(n,actual,target,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { difference<<<gridBlock,threadBlock>>>(n,actual,target,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3ecd75e9713d37a4e7871c1563e6e4f69ed26d2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdio> #include <libsgm.h> #include "winner_takes_all.hpp" #include "utility.hpp" namespace sgm { namespace { static constexpr unsigned int NUM_PATHS = 8u; static constexpr unsigned int WARPS_PER_BLOCK = 8u; static constexpr unsigned int BLOCK_SIZE = WARPS_PER_BLOCK * WARP_SIZE; __device__ inline uint32_t pack_cost_index(uint32_t cost, uint32_t index){ union { uint32_t uint32; ushort2 uint16x2; } u; u.uint16x2.x = static_cast<uint16_t>(index); u.uint16x2.y = static_cast<uint16_t>(cost); return u.uint32; } __device__ uint32_t unpack_cost(uint32_t packed){ return packed >> 16; } __device__ int unpack_index(uint32_t packed){ return packed & 0xffffu; } using ComputeDisparity = uint32_t(*)(uint32_t, uint32_t, uint16_t*); __device__ inline uint32_t compute_disparity_normal(uint32_t disp, uint32_t cost = 0, uint16_t* smem = nullptr) { return disp; } template <size_t MAX_DISPARITY> __device__ inline uint32_t compute_disparity_subpixel(uint32_t disp, uint32_t cost, uint16_t* smem) { int subp = disp; subp <<= sgm::StereoSGM::SUBPIXEL_SHIFT; if (disp > 0 && disp < MAX_DISPARITY - 1) { const int left = smem[disp - 1]; const int right = smem[disp + 1]; const int numer = left - right; const int denom = left - 2 * cost + right; subp += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom); } return subp; } template <unsigned int MAX_DISPARITY, ComputeDisparity compute_disparity = compute_disparity_normal> __global__ void winner_takes_all_kernel( output_type *left_dest, output_type *right_dest, const cost_type *src, int width, int height, int pitch, float uniqueness) { static const unsigned int ACCUMULATION_PER_THREAD = 16u; static const unsigned int REDUCTION_PER_THREAD = MAX_DISPARITY / WARP_SIZE; static const unsigned int ACCUMULATION_INTERVAL = ACCUMULATION_PER_THREAD / REDUCTION_PER_THREAD; static const unsigned int UNROLL_DEPTH = (REDUCTION_PER_THREAD > ACCUMULATION_INTERVAL) ? REDUCTION_PER_THREAD : ACCUMULATION_INTERVAL; const unsigned int cost_step = MAX_DISPARITY * width * height; const unsigned int warp_id = threadIdx.x / WARP_SIZE; const unsigned int lane_id = threadIdx.x % WARP_SIZE; const unsigned int y = blockIdx.x * WARPS_PER_BLOCK + warp_id; src += y * MAX_DISPARITY * width; left_dest += y * pitch; right_dest += y * pitch; if(y >= height){ return; } __shared__ uint16_t smem_cost_sum[WARPS_PER_BLOCK][ACCUMULATION_INTERVAL][MAX_DISPARITY]; uint32_t right_best[REDUCTION_PER_THREAD]; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ right_best[i] = 0xffffffffu; } for(unsigned int x0 = 0; x0 < width; x0 += UNROLL_DEPTH){ #pragma unroll for(unsigned int x1 = 0; x1 < UNROLL_DEPTH; ++x1){ if(x1 % ACCUMULATION_INTERVAL == 0){ const unsigned int k = lane_id * ACCUMULATION_PER_THREAD; const unsigned int k_hi = k / MAX_DISPARITY; const unsigned int k_lo = k % MAX_DISPARITY; const unsigned int x = x0 + x1 + k_hi; if(x < width){ const unsigned int offset = x * MAX_DISPARITY + k_lo; uint32_t sum[ACCUMULATION_PER_THREAD]; for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){ sum[i] = 0; } for(unsigned int p = 0; p < NUM_PATHS; ++p){ uint32_t load_buffer[ACCUMULATION_PER_THREAD]; load_uint8_vector<ACCUMULATION_PER_THREAD>( load_buffer, &src[p * cost_step + offset]); for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){ sum[i] += load_buffer[i]; } } store_uint16_vector<ACCUMULATION_PER_THREAD>( &smem_cost_sum[warp_id][k_hi][k_lo], sum); } #if TORCH_HIP_VERSION >= 9000 __syncwarp(); #else __threadfence_block(); #endif } const unsigned int x = x0 + x1; if(x < width){ // Load sum of costs const unsigned int smem_x = x1 % ACCUMULATION_INTERVAL; const unsigned int k0 = lane_id * REDUCTION_PER_THREAD; uint32_t local_cost_sum[REDUCTION_PER_THREAD]; load_uint16_vector<REDUCTION_PER_THREAD>( local_cost_sum, &smem_cost_sum[warp_id][smem_x][k0]); // Pack sum of costs and dispairty uint32_t local_packed_cost[REDUCTION_PER_THREAD]; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ local_packed_cost[i] = pack_cost_index(local_cost_sum[i], k0 + i); } // Update left uint32_t best = 0xffffffffu; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ best = min(best, local_packed_cost[i]); } best = subgroup_min<WARP_SIZE>(best, 0xffffffffu); // Update right #pragma unroll for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ const unsigned int k = lane_id * REDUCTION_PER_THREAD + i; const int p = static_cast<int>(((x - k) & ~(MAX_DISPARITY - 1)) + k); const unsigned int d = static_cast<unsigned int>(x - p); #if TORCH_HIP_VERSION >= 9000 const uint32_t recv = __shfl_sync(0xffffffffu, local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD], d / REDUCTION_PER_THREAD, WARP_SIZE); #else const uint32_t recv = __shfl( local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD], d / REDUCTION_PER_THREAD, WARP_SIZE); #endif right_best[i] = min(right_best[i], recv); if(d == MAX_DISPARITY - 1){ if(0 <= p){ right_dest[p] = compute_disparity_normal(unpack_index(right_best[i])); } right_best[i] = 0xffffffffu; } } // Resume updating left to avoid execution dependency const uint32_t bestCost = unpack_cost(best); const int bestDisp = unpack_index(best); bool uniq = true; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ const uint32_t x = local_packed_cost[i]; const bool uniq1 = unpack_cost(x) * uniqueness >= bestCost; const bool uniq2 = abs(unpack_index(x) - bestDisp) <= 1; uniq &= uniq1 || uniq2; } uniq = subgroup_and<WARP_SIZE>(uniq, 0xffffffffu); if(lane_id == 0){ left_dest[x] = uniq ? compute_disparity(bestDisp, bestCost, smem_cost_sum[warp_id][smem_x]) : 0; } } } } for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ const unsigned int k = lane_id * REDUCTION_PER_THREAD + i; const int p = static_cast<int>(((width - k) & ~(MAX_DISPARITY - 1)) + k); if(p < width){ right_dest[p] = compute_disparity_normal(unpack_index(right_best[i])); } } } template <size_t MAX_DISPARITY> void enqueue_winner_takes_all( output_type *left_dest, output_type *right_dest, const cost_type *src, int width, int height, int pitch, float uniqueness, bool subpixel, hipStream_t stream) { const int gdim = (height + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; const int bdim = BLOCK_SIZE; if (subpixel) { hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, compute_disparity_subpixel<MAX_DISPARITY>>), dim3(gdim), dim3(bdim), 0, stream, left_dest, right_dest, src, width, height, pitch, uniqueness); } else { hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, compute_disparity_normal>), dim3(gdim), dim3(bdim), 0, stream, left_dest, right_dest, src, width, height, pitch, uniqueness); } } } template <size_t MAX_DISPARITY> WinnerTakesAll<MAX_DISPARITY>::WinnerTakesAll() : m_left_buffer() , m_right_buffer() { } template <size_t MAX_DISPARITY> void WinnerTakesAll<MAX_DISPARITY>::enqueue( const cost_type *src, int width, int height, int pitch, float uniqueness, bool subpixel, hipStream_t stream) { if(m_left_buffer.size() != static_cast<size_t>(pitch * height)){ m_left_buffer = DeviceBuffer<output_type>(pitch * height); } if(m_right_buffer.size() != static_cast<size_t>(pitch * height)){ m_right_buffer = DeviceBuffer<output_type>(pitch * height); } enqueue_winner_takes_all<MAX_DISPARITY>( m_left_buffer.data(), m_right_buffer.data(), src, width, height, pitch, uniqueness, subpixel, stream); } template <size_t MAX_DISPARITY> void WinnerTakesAll<MAX_DISPARITY>::enqueue( output_type* left, output_type* right, const cost_type *src, int width, int height, int pitch, float uniqueness, bool subpixel, hipStream_t stream) { enqueue_winner_takes_all<MAX_DISPARITY>( left, right, src, width, height, pitch, uniqueness, subpixel, stream); } template class WinnerTakesAll< 64>; template class WinnerTakesAll<128>; }
3ecd75e9713d37a4e7871c1563e6e4f69ed26d2b.cu
/* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdio> #include <libsgm.h> #include "winner_takes_all.hpp" #include "utility.hpp" namespace sgm { namespace { static constexpr unsigned int NUM_PATHS = 8u; static constexpr unsigned int WARPS_PER_BLOCK = 8u; static constexpr unsigned int BLOCK_SIZE = WARPS_PER_BLOCK * WARP_SIZE; __device__ inline uint32_t pack_cost_index(uint32_t cost, uint32_t index){ union { uint32_t uint32; ushort2 uint16x2; } u; u.uint16x2.x = static_cast<uint16_t>(index); u.uint16x2.y = static_cast<uint16_t>(cost); return u.uint32; } __device__ uint32_t unpack_cost(uint32_t packed){ return packed >> 16; } __device__ int unpack_index(uint32_t packed){ return packed & 0xffffu; } using ComputeDisparity = uint32_t(*)(uint32_t, uint32_t, uint16_t*); __device__ inline uint32_t compute_disparity_normal(uint32_t disp, uint32_t cost = 0, uint16_t* smem = nullptr) { return disp; } template <size_t MAX_DISPARITY> __device__ inline uint32_t compute_disparity_subpixel(uint32_t disp, uint32_t cost, uint16_t* smem) { int subp = disp; subp <<= sgm::StereoSGM::SUBPIXEL_SHIFT; if (disp > 0 && disp < MAX_DISPARITY - 1) { const int left = smem[disp - 1]; const int right = smem[disp + 1]; const int numer = left - right; const int denom = left - 2 * cost + right; subp += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom); } return subp; } template <unsigned int MAX_DISPARITY, ComputeDisparity compute_disparity = compute_disparity_normal> __global__ void winner_takes_all_kernel( output_type *left_dest, output_type *right_dest, const cost_type *src, int width, int height, int pitch, float uniqueness) { static const unsigned int ACCUMULATION_PER_THREAD = 16u; static const unsigned int REDUCTION_PER_THREAD = MAX_DISPARITY / WARP_SIZE; static const unsigned int ACCUMULATION_INTERVAL = ACCUMULATION_PER_THREAD / REDUCTION_PER_THREAD; static const unsigned int UNROLL_DEPTH = (REDUCTION_PER_THREAD > ACCUMULATION_INTERVAL) ? REDUCTION_PER_THREAD : ACCUMULATION_INTERVAL; const unsigned int cost_step = MAX_DISPARITY * width * height; const unsigned int warp_id = threadIdx.x / WARP_SIZE; const unsigned int lane_id = threadIdx.x % WARP_SIZE; const unsigned int y = blockIdx.x * WARPS_PER_BLOCK + warp_id; src += y * MAX_DISPARITY * width; left_dest += y * pitch; right_dest += y * pitch; if(y >= height){ return; } __shared__ uint16_t smem_cost_sum[WARPS_PER_BLOCK][ACCUMULATION_INTERVAL][MAX_DISPARITY]; uint32_t right_best[REDUCTION_PER_THREAD]; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ right_best[i] = 0xffffffffu; } for(unsigned int x0 = 0; x0 < width; x0 += UNROLL_DEPTH){ #pragma unroll for(unsigned int x1 = 0; x1 < UNROLL_DEPTH; ++x1){ if(x1 % ACCUMULATION_INTERVAL == 0){ const unsigned int k = lane_id * ACCUMULATION_PER_THREAD; const unsigned int k_hi = k / MAX_DISPARITY; const unsigned int k_lo = k % MAX_DISPARITY; const unsigned int x = x0 + x1 + k_hi; if(x < width){ const unsigned int offset = x * MAX_DISPARITY + k_lo; uint32_t sum[ACCUMULATION_PER_THREAD]; for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){ sum[i] = 0; } for(unsigned int p = 0; p < NUM_PATHS; ++p){ uint32_t load_buffer[ACCUMULATION_PER_THREAD]; load_uint8_vector<ACCUMULATION_PER_THREAD>( load_buffer, &src[p * cost_step + offset]); for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){ sum[i] += load_buffer[i]; } } store_uint16_vector<ACCUMULATION_PER_THREAD>( &smem_cost_sum[warp_id][k_hi][k_lo], sum); } #if CUDA_VERSION >= 9000 __syncwarp(); #else __threadfence_block(); #endif } const unsigned int x = x0 + x1; if(x < width){ // Load sum of costs const unsigned int smem_x = x1 % ACCUMULATION_INTERVAL; const unsigned int k0 = lane_id * REDUCTION_PER_THREAD; uint32_t local_cost_sum[REDUCTION_PER_THREAD]; load_uint16_vector<REDUCTION_PER_THREAD>( local_cost_sum, &smem_cost_sum[warp_id][smem_x][k0]); // Pack sum of costs and dispairty uint32_t local_packed_cost[REDUCTION_PER_THREAD]; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ local_packed_cost[i] = pack_cost_index(local_cost_sum[i], k0 + i); } // Update left uint32_t best = 0xffffffffu; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ best = min(best, local_packed_cost[i]); } best = subgroup_min<WARP_SIZE>(best, 0xffffffffu); // Update right #pragma unroll for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ const unsigned int k = lane_id * REDUCTION_PER_THREAD + i; const int p = static_cast<int>(((x - k) & ~(MAX_DISPARITY - 1)) + k); const unsigned int d = static_cast<unsigned int>(x - p); #if CUDA_VERSION >= 9000 const uint32_t recv = __shfl_sync(0xffffffffu, local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD], d / REDUCTION_PER_THREAD, WARP_SIZE); #else const uint32_t recv = __shfl( local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD], d / REDUCTION_PER_THREAD, WARP_SIZE); #endif right_best[i] = min(right_best[i], recv); if(d == MAX_DISPARITY - 1){ if(0 <= p){ right_dest[p] = compute_disparity_normal(unpack_index(right_best[i])); } right_best[i] = 0xffffffffu; } } // Resume updating left to avoid execution dependency const uint32_t bestCost = unpack_cost(best); const int bestDisp = unpack_index(best); bool uniq = true; for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ const uint32_t x = local_packed_cost[i]; const bool uniq1 = unpack_cost(x) * uniqueness >= bestCost; const bool uniq2 = abs(unpack_index(x) - bestDisp) <= 1; uniq &= uniq1 || uniq2; } uniq = subgroup_and<WARP_SIZE>(uniq, 0xffffffffu); if(lane_id == 0){ left_dest[x] = uniq ? compute_disparity(bestDisp, bestCost, smem_cost_sum[warp_id][smem_x]) : 0; } } } } for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){ const unsigned int k = lane_id * REDUCTION_PER_THREAD + i; const int p = static_cast<int>(((width - k) & ~(MAX_DISPARITY - 1)) + k); if(p < width){ right_dest[p] = compute_disparity_normal(unpack_index(right_best[i])); } } } template <size_t MAX_DISPARITY> void enqueue_winner_takes_all( output_type *left_dest, output_type *right_dest, const cost_type *src, int width, int height, int pitch, float uniqueness, bool subpixel, cudaStream_t stream) { const int gdim = (height + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; const int bdim = BLOCK_SIZE; if (subpixel) { winner_takes_all_kernel<MAX_DISPARITY, compute_disparity_subpixel<MAX_DISPARITY>><<<gdim, bdim, 0, stream>>>( left_dest, right_dest, src, width, height, pitch, uniqueness); } else { winner_takes_all_kernel<MAX_DISPARITY, compute_disparity_normal><<<gdim, bdim, 0, stream>>>( left_dest, right_dest, src, width, height, pitch, uniqueness); } } } template <size_t MAX_DISPARITY> WinnerTakesAll<MAX_DISPARITY>::WinnerTakesAll() : m_left_buffer() , m_right_buffer() { } template <size_t MAX_DISPARITY> void WinnerTakesAll<MAX_DISPARITY>::enqueue( const cost_type *src, int width, int height, int pitch, float uniqueness, bool subpixel, cudaStream_t stream) { if(m_left_buffer.size() != static_cast<size_t>(pitch * height)){ m_left_buffer = DeviceBuffer<output_type>(pitch * height); } if(m_right_buffer.size() != static_cast<size_t>(pitch * height)){ m_right_buffer = DeviceBuffer<output_type>(pitch * height); } enqueue_winner_takes_all<MAX_DISPARITY>( m_left_buffer.data(), m_right_buffer.data(), src, width, height, pitch, uniqueness, subpixel, stream); } template <size_t MAX_DISPARITY> void WinnerTakesAll<MAX_DISPARITY>::enqueue( output_type* left, output_type* right, const cost_type *src, int width, int height, int pitch, float uniqueness, bool subpixel, cudaStream_t stream) { enqueue_winner_takes_all<MAX_DISPARITY>( left, right, src, width, height, pitch, uniqueness, subpixel, stream); } template class WinnerTakesAll< 64>; template class WinnerTakesAll<128>; }
d1537b77cb23b0c3526489bd2d27174f8236d0b3.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #include <algorithm> #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 8, N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // Define the epilogue operation as LinearCombinationRelu. This is approximately equal to // // d_ij = max(0, alpha * sum_k(a_ik * b_kj) + beta * c_ij ) // using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias( {problem_size.m(), 1}); // <- Create matrix C with dimensions M x 1 cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_bias.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c_bias.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device {tensor_c_bias.device_data(), 0}, // <- the C matrix is treated as the bias vector. We can enable the GEMM // to project away the N dimension by setting the stride to zero. tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference gemm kernel // cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device_reference; // Launch device reference to compute strictly the product A * B gemm_device_reference( problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), 0, tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Compute bias + relu in host code for (int i = 0; i < problem_size.m(); ++i) { for (int j = 0; j < problem_size.n(); ++j) { tensor_ref_d.at({i, j}) = ::max( ElementOutput(0), ElementOutput(tensor_ref_d.at({i, j}) + beta * tensor_c_bias.at({i, 0})) ); } } // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main() { // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } else { return run(); } }
d1537b77cb23b0c3526489bd2d27174f8236d0b3.cu
/*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #include <algorithm> #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 8, N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // Define the epilogue operation as LinearCombinationRelu. This is approximately equal to // // d_ij = max(0, alpha * sum_k(a_ik * b_kj) + beta * c_ij ) // using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias( {problem_size.m(), 1}); // <- Create matrix C with dimensions M x 1 cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_bias.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c_bias.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device {tensor_c_bias.device_data(), 0}, // <- the C matrix is treated as the bias vector. We can enable the GEMM // to project away the N dimension by setting the stride to zero. tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference gemm kernel // cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device_reference; // Launch device reference to compute strictly the product A * B gemm_device_reference( problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), 0, tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Compute bias + relu in host code for (int i = 0; i < problem_size.m(); ++i) { for (int j = 0; j < problem_size.n(); ++j) { tensor_ref_d.at({i, j}) = std::max( ElementOutput(0), ElementOutput(tensor_ref_d.at({i, j}) + beta * tensor_c_bias.at({i, 0})) ); } } // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main() { // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } else { return run(); } }
f859fe1117109b4b5821c9f355a988747c5408db.hip
// !!! This is a file automatically generated by hipify!!! #include "THHApply.cuh" #include "utils.h" struct LeakyReLUUpdateOutput { const float slope_; LeakyReLUUpdateOutput(float slope): slope_(slope) {} __device__ __forceinline__ void operator()(float* out, float* in) { float x = *in; *out = (x > 0) ? x : x*slope_; } }; // in-place variant struct LeakyReLUUpdateOutputIP { const float slope_; LeakyReLUUpdateOutputIP(float slope): slope_(slope) {} __device__ __forceinline__ void operator()(float* x) { *x = (*x > 0) ? *x : *x*slope_; } }; static int cunn_LeakyReLU_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); double slope = luaT_getfieldchecknumber(L, 1, "slope"); bool inPlace = luaT_getfieldcheckboolean(L, 1, "inplace"); THAssert(THCudaTensor_checkGPU(state, 2, input, output)); if (inPlace) { THCudaTensor_pointwiseApply1(state, input, LeakyReLUUpdateOutputIP(slope)); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, LeakyReLUUpdateOutput(slope)); } THCudaCheck(hipGetLastError()); return 1; } struct LeakyReLUUpdateGradInput { const float slope_; LeakyReLUUpdateGradInput(float slope) : slope_(slope) {} __device__ __forceinline__ void operator()(float* gradInput, float* input, float* gradOutput) const { *gradInput = (*input > 0) ? *gradOutput : *gradOutput*slope_; } }; struct LeakyReLUUpdateGradInputIP { const float slope_; LeakyReLUUpdateGradInputIP(float slope) : slope_(slope) {} __device__ __forceinline__ void operator()(float* gradOutput, float* input) const { *gradOutput = (*input > 0) ? *gradOutput : *gradOutput*slope_; } }; static int cunn_LeakyReLU_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); double slope = luaT_getfieldchecknumber(L, 1, "slope"); bool inPlace = luaT_getfieldcheckboolean(L, 1, "inplace"); THAssert(THCudaTensor_checkGPU(state, 4, input, output, gradInput, gradOutput)); if (inPlace) { THCudaTensor_pointwiseApply2(state, gradOutput, input, LeakyReLUUpdateGradInputIP(slope)); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, output); THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput(slope)); } THCudaCheck(hipGetLastError()); return 1; } static const struct luaL_Reg cunn_LeakyReLU__ [] = { {"LeakyReLU_updateOutput", cunn_LeakyReLU_updateOutput}, {"LeakyReLU_updateGradInput", cunn_LeakyReLU_updateGradInput}, {NULL, NULL} }; void cunn_LeakyReLU_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_LeakyReLU__, "nn"); lua_pop(L,1); }
f859fe1117109b4b5821c9f355a988747c5408db.cu
#include "THCApply.cuh" #include "utils.h" struct LeakyReLUUpdateOutput { const float slope_; LeakyReLUUpdateOutput(float slope): slope_(slope) {} __device__ __forceinline__ void operator()(float* out, float* in) { float x = *in; *out = (x > 0) ? x : x*slope_; } }; // in-place variant struct LeakyReLUUpdateOutputIP { const float slope_; LeakyReLUUpdateOutputIP(float slope): slope_(slope) {} __device__ __forceinline__ void operator()(float* x) { *x = (*x > 0) ? *x : *x*slope_; } }; static int cunn_LeakyReLU_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); double slope = luaT_getfieldchecknumber(L, 1, "slope"); bool inPlace = luaT_getfieldcheckboolean(L, 1, "inplace"); THAssert(THCudaTensor_checkGPU(state, 2, input, output)); if (inPlace) { THCudaTensor_pointwiseApply1(state, input, LeakyReLUUpdateOutputIP(slope)); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, LeakyReLUUpdateOutput(slope)); } THCudaCheck(cudaGetLastError()); return 1; } struct LeakyReLUUpdateGradInput { const float slope_; LeakyReLUUpdateGradInput(float slope) : slope_(slope) {} __device__ __forceinline__ void operator()(float* gradInput, float* input, float* gradOutput) const { *gradInput = (*input > 0) ? *gradOutput : *gradOutput*slope_; } }; struct LeakyReLUUpdateGradInputIP { const float slope_; LeakyReLUUpdateGradInputIP(float slope) : slope_(slope) {} __device__ __forceinline__ void operator()(float* gradOutput, float* input) const { *gradOutput = (*input > 0) ? *gradOutput : *gradOutput*slope_; } }; static int cunn_LeakyReLU_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); double slope = luaT_getfieldchecknumber(L, 1, "slope"); bool inPlace = luaT_getfieldcheckboolean(L, 1, "inplace"); THAssert(THCudaTensor_checkGPU(state, 4, input, output, gradInput, gradOutput)); if (inPlace) { THCudaTensor_pointwiseApply2(state, gradOutput, input, LeakyReLUUpdateGradInputIP(slope)); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, output); THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput(slope)); } THCudaCheck(cudaGetLastError()); return 1; } static const struct luaL_Reg cunn_LeakyReLU__ [] = { {"LeakyReLU_updateOutput", cunn_LeakyReLU_updateOutput}, {"LeakyReLU_updateGradInput", cunn_LeakyReLU_updateGradInput}, {NULL, NULL} }; void cunn_LeakyReLU_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_LeakyReLU__, "nn"); lua_pop(L,1); }
4af5ce5a5b5a8c86c6abd1378a7b149de1f2c8cd.hip
// !!! This is a file automatically generated by hipify!!! #include <iso646.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <iostream> #include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; /* output[curb * 3 + 0] = 255; output[curb * 3 + 1] = 255; output[curb * 3 + 2] = 255;*/ } } } __global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt + xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy + yt, xb = ox + xt; const int curb = wb*yb + xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { if (yt == 0 || xt == 0 || yt == (ht - 1) || xt == (wt - 1)) { fixed[curt * 3 + 0] = 0; fixed[curt * 3 + 1] = 0; fixed[curt * 3 + 2] = 0; } else { fixed[curt * 3 + 0] = 4 * target[curt * 3 + 0] - (target[(curt - wt) * 3 + 0] + target[(curt - 1) * 3 + 0] + target[(curt + wt) * 3 + 0] + target[(curt + 1) * 3 + 0]); fixed[curt * 3 + 1] = 4 * target[curt * 3 + 1] - (target[(curt - wt) * 3 + 1] + target[(curt - 1) * 3 + 1] + target[(curt + wt) * 3 + 1] + target[(curt + 1) * 3 + 1]); fixed[curt * 3 + 2] = 4 * target[curt * 3 + 2] - (target[(curt - wt) * 3 + 2] + target[(curt - 1) * 3 + 2] + target[(curt + wt) * 3 + 2] + target[(curt + 1) * 3 + 2]); } if (yt == 0 || mask[curt - wt] != 255.0f) { fixed[curt * 3 + 0] += background[(curb - wb) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb - wb) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb - wb) * 3 + 2]; } if (xt == 0 || mask[curt - 1] != 255.0f) { fixed[curt * 3 + 0] += background[(curb - 1) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb - 1) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb - 1) * 3 + 2]; } if (yt == (ht - 1) || mask[curt + wt] != 255.0f) { fixed[curt * 3 + 0] += background[(curb + wb) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb + wb) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb + wb) * 3 + 2]; } if (xt == (wt - 1) || mask[curt + 1] != 255.0f) { fixed[curt * 3 + 0] += background[(curb + 1) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb + 1) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb + 1) * 3 + 2]; } } } } __global__ void PoissonImageCloningIteration( const float *fixed, const float *mask, const float *buf1, float *buf2, const int wt, const int ht ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt + xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { buf2[curt * 3 + 0] = fixed[curt * 3 + 0]; buf2[curt * 3 + 1] = fixed[curt * 3 + 1]; buf2[curt * 3 + 2] = fixed[curt * 3 + 2]; if (yt != 0 && mask[curt - wt] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt - wt) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt - wt) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt - wt) * 3 + 2]; } if (xt != 0 && mask[curt - 1] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt - 1) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt - 1) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt - 1) * 3 + 2]; } if (yt != (ht - 1) && mask[curt + wt] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt + wt) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt + wt) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt + wt) * 3 + 2]; } if (xt != (wt - 1) && mask[curt + 1] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt + 1) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt + 1) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt + 1) * 3 + 2]; } buf2[curt * 3 + 0] /= 4; buf2[curt * 3 + 1] /= 4; buf2[curt * 3 + 2] /= 4; } } __global__ void ImageShrinking( const float *src, float *dst, const int ws, const int hs ) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < ((hs + 1) / 2) and x < ((ws + 1) / 2)) { dst[(((ws + 1) /2)*y + x) * 3 + 0] = src[(ws*(2*y) + (2*x)) * 3 + 0]; dst[(((ws + 1) / 2)*y + x) * 3 + 1] = src[(ws*(2 * y) + (2 * x)) * 3 + 1]; dst[(((ws + 1) / 2)*y + x) * 3 + 2] = src[(ws*(2 * y) + (2 * x)) * 3 + 2]; } } __global__ void ImageShrinkingMask( const float *src, float *dst, const int ws, const int hs ) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < ((hs + 1) / 2) and x < ((ws + 1) / 2)) { dst[((ws + 1) / 2)*y + x] = src[ws*(2 * y) + (2 * x)]; } } __global__ void ImageUpsample( const float *src, float *dst, const int wd, const int hd ) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y * 2 < hd and x * 2 < wd) { dst[((wd *(y * 2)) + (x * 2)) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2)) + (x * 2)) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2)) + (x * 2)) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; if ((x * 2 + 1) < wd) { dst[((wd *(y * 2)) + (x * 2) + 1) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2)) + (x * 2) + 1) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2)) + (x * 2) + 1) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; } if ((y * 2 + 1) < hd) { dst[((wd *(y * 2 + 1)) + (x * 2)) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2 + 1)) + (x * 2)) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2 + 1)) + (x * 2)) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; } if ((x * 2 + 1) < wd and (y * 2 + 1) < hd) { dst[((wd *(y * 2 + 1)) + (x * 2) + 1) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2 + 1)) + (x * 2) + 1) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2 + 1)) + (x * 2) + 1) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; } } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { dim3 gdim(CeilDiv(wt, 32), CeilDiv(ht, 16)), bdim(32, 16); //set up /* float *fixed; float *buf1, *buf2; hipMalloc(&fixed, 3 * wt*ht*sizeof(float)); hipMalloc(&buf1, 3 * wt*ht*sizeof(float)); hipMalloc(&buf2, 3 * wt*ht*sizeof(float)); //initialize the iteration CalculateFixed <<< gdim, bdim >>>( background, target, mask, fixed, wb, hb, wt, ht, oy, ox ); hipMemcpy(buf1, target, sizeof(float) * 3 * wt*ht, hipMemcpyDeviceToDevice); //iterate //Original for (int i = 0; i < 10000; ++i) { PoissonImageCloningIteration <<<gdim, bdim >>>( fixed, mask, buf1, buf2, wt, ht ); PoissonImageCloningIteration <<<gdim, bdim >>>( fixed, mask, buf2, buf1, wt, ht ); }*/ //Hierachical //declare float *background_hier[4]; float *fixed_hier[4]; float *mask_hier[4]; float *buf1_hier[4]; float *buf2_hier[4]; int wbs[4]; int hbs[4]; int ws[4]; int hs[4]; for (int i = 0; i < 4; i++) { if (i == 0) { ws[i] = wt; hs[i] = ht; wbs[i] = wb; hbs[i] = hb; } else { ws[i] = (ws[i-1] + 1) / 2; hs[i] = (hs[i-1] + 1) / 2; wbs[i] = (wbs[i-1] + 1) / 2; hbs[i] = (hbs[i-1] + 1) / 2; } } //Malloc for (int i = 0; i < 4; i++) { hipMalloc(&background_hier[i], 3 * wbs[i] * hbs[i] * sizeof(float)); hipMalloc(&fixed_hier[i], 3 * ws[i] * hs[i] * sizeof(float)); hipMalloc(&mask_hier[i], ws[i] * hs[i] * sizeof(float)); hipMalloc(&buf1_hier[i], 3 * ws[i] * hs[i] * sizeof(float)); hipMalloc(&buf2_hier[i], 3 * ws[i] * hs[i] * sizeof(float)); } //initialize hipMemcpy(background_hier[0], background, sizeof(float) * 3 * wb * hb, hipMemcpyDeviceToDevice); hipMemcpy(mask_hier[0], mask, sizeof(float) * wt*ht, hipMemcpyDeviceToDevice); hipMemcpy(buf1_hier[0], target, sizeof(float) * 3 * wt*ht, hipMemcpyDeviceToDevice); for (int i = 1; i < 4; i++) { ImageShrinking << < dim3(CeilDiv(wbs[i], 32), CeilDiv(hbs[i], 16)), dim3(32, 16) >> > (background_hier[i-1], background_hier[i], wbs[i-1], hbs[i-1]); ImageShrinkingMask << < dim3(CeilDiv(ws[i], 32), CeilDiv(hs[i], 16)), dim3(32, 16) >> > (mask_hier[i-1], mask_hier[i], ws[i-1], hs[i-1]); ImageShrinking << < dim3(CeilDiv(ws[i], 32), CeilDiv(hs[i], 16)), dim3(32, 16) >> > (buf1_hier[i-1], buf1_hier[i], ws[i-1], hs[i-1]); } //fixed for (int i = 0; i < 4; i++) { CalculateFixed << < dim3(CeilDiv(ws[i], 32), CeilDiv(hs[i], 16)), dim3(32, 16) >> >( background_hier[i], buf1_hier[i], mask_hier[i], fixed_hier[i], wbs[i], hbs[i], ws[i], hs[i], (oy / pow(2, i)), (ox / pow(2, i)) ); } //iteration for (int i = 0; i < 4; ++i) { for (int j = 0; j < 500; ++j) { PoissonImageCloningIteration << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> >( fixed_hier[3 - i], mask_hier[3 - i], buf1_hier[3 - i], buf2_hier[3 - i], ws[3 - i], hs[3 - i] ); PoissonImageCloningIteration << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> >( fixed_hier[3 - i], mask_hier[3 - i], buf2_hier[3 - i], buf1_hier[3 - i], ws[3 - i], hs[3 - i] ); } if (i < 3) { ImageUpsample << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> > (buf1_hier[3 - i], buf1_hier[2 - i], ws[2 - i], hs[2 - i]); ImageUpsample << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> > (buf2_hier[3 - i], buf2_hier[2 - i], ws[2 - i], hs[2 - i]); } } //copy the image back hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice); /* SimpleClone <<< gdim, bdim >>>( background, buf1, mask, output, wb, hb, wt, ht, oy, ox );*/ SimpleClone << < gdim, bdim >> >( background, buf1_hier[0], mask, output, wb, hb, wt, ht, oy, ox ); //clean up /* hipFree(fixed); hipFree(buf1); hipFree(buf2);*/ for (int i = 0; i < 4; i++) { hipFree(fixed_hier[i]); hipFree(mask_hier[i]); hipFree(buf1_hier[i]); hipFree(buf2_hier[i]); } }
4af5ce5a5b5a8c86c6abd1378a7b149de1f2c8cd.cu
#include <iso646.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <iostream> #include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; /* output[curb * 3 + 0] = 255; output[curb * 3 + 1] = 255; output[curb * 3 + 2] = 255;*/ } } } __global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt + xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy + yt, xb = ox + xt; const int curb = wb*yb + xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { if (yt == 0 || xt == 0 || yt == (ht - 1) || xt == (wt - 1)) { fixed[curt * 3 + 0] = 0; fixed[curt * 3 + 1] = 0; fixed[curt * 3 + 2] = 0; } else { fixed[curt * 3 + 0] = 4 * target[curt * 3 + 0] - (target[(curt - wt) * 3 + 0] + target[(curt - 1) * 3 + 0] + target[(curt + wt) * 3 + 0] + target[(curt + 1) * 3 + 0]); fixed[curt * 3 + 1] = 4 * target[curt * 3 + 1] - (target[(curt - wt) * 3 + 1] + target[(curt - 1) * 3 + 1] + target[(curt + wt) * 3 + 1] + target[(curt + 1) * 3 + 1]); fixed[curt * 3 + 2] = 4 * target[curt * 3 + 2] - (target[(curt - wt) * 3 + 2] + target[(curt - 1) * 3 + 2] + target[(curt + wt) * 3 + 2] + target[(curt + 1) * 3 + 2]); } if (yt == 0 || mask[curt - wt] != 255.0f) { fixed[curt * 3 + 0] += background[(curb - wb) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb - wb) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb - wb) * 3 + 2]; } if (xt == 0 || mask[curt - 1] != 255.0f) { fixed[curt * 3 + 0] += background[(curb - 1) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb - 1) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb - 1) * 3 + 2]; } if (yt == (ht - 1) || mask[curt + wt] != 255.0f) { fixed[curt * 3 + 0] += background[(curb + wb) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb + wb) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb + wb) * 3 + 2]; } if (xt == (wt - 1) || mask[curt + 1] != 255.0f) { fixed[curt * 3 + 0] += background[(curb + 1) * 3 + 0]; fixed[curt * 3 + 1] += background[(curb + 1) * 3 + 1]; fixed[curt * 3 + 2] += background[(curb + 1) * 3 + 2]; } } } } __global__ void PoissonImageCloningIteration( const float *fixed, const float *mask, const float *buf1, float *buf2, const int wt, const int ht ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt + xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { buf2[curt * 3 + 0] = fixed[curt * 3 + 0]; buf2[curt * 3 + 1] = fixed[curt * 3 + 1]; buf2[curt * 3 + 2] = fixed[curt * 3 + 2]; if (yt != 0 && mask[curt - wt] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt - wt) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt - wt) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt - wt) * 3 + 2]; } if (xt != 0 && mask[curt - 1] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt - 1) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt - 1) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt - 1) * 3 + 2]; } if (yt != (ht - 1) && mask[curt + wt] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt + wt) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt + wt) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt + wt) * 3 + 2]; } if (xt != (wt - 1) && mask[curt + 1] == 255.0f) { buf2[curt * 3 + 0] += buf1[(curt + 1) * 3 + 0]; buf2[curt * 3 + 1] += buf1[(curt + 1) * 3 + 1]; buf2[curt * 3 + 2] += buf1[(curt + 1) * 3 + 2]; } buf2[curt * 3 + 0] /= 4; buf2[curt * 3 + 1] /= 4; buf2[curt * 3 + 2] /= 4; } } __global__ void ImageShrinking( const float *src, float *dst, const int ws, const int hs ) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < ((hs + 1) / 2) and x < ((ws + 1) / 2)) { dst[(((ws + 1) /2)*y + x) * 3 + 0] = src[(ws*(2*y) + (2*x)) * 3 + 0]; dst[(((ws + 1) / 2)*y + x) * 3 + 1] = src[(ws*(2 * y) + (2 * x)) * 3 + 1]; dst[(((ws + 1) / 2)*y + x) * 3 + 2] = src[(ws*(2 * y) + (2 * x)) * 3 + 2]; } } __global__ void ImageShrinkingMask( const float *src, float *dst, const int ws, const int hs ) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y < ((hs + 1) / 2) and x < ((ws + 1) / 2)) { dst[((ws + 1) / 2)*y + x] = src[ws*(2 * y) + (2 * x)]; } } __global__ void ImageUpsample( const float *src, float *dst, const int wd, const int hd ) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; if (y * 2 < hd and x * 2 < wd) { dst[((wd *(y * 2)) + (x * 2)) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2)) + (x * 2)) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2)) + (x * 2)) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; if ((x * 2 + 1) < wd) { dst[((wd *(y * 2)) + (x * 2) + 1) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2)) + (x * 2) + 1) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2)) + (x * 2) + 1) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; } if ((y * 2 + 1) < hd) { dst[((wd *(y * 2 + 1)) + (x * 2)) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2 + 1)) + (x * 2)) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2 + 1)) + (x * 2)) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; } if ((x * 2 + 1) < wd and (y * 2 + 1) < hd) { dst[((wd *(y * 2 + 1)) + (x * 2) + 1) * 3 + 0] = src[(((wd + 1) / 2) * y + x) * 3 + 0]; dst[((wd *(y * 2 + 1)) + (x * 2) + 1) * 3 + 1] = src[(((wd + 1) / 2) * y + x) * 3 + 1]; dst[((wd *(y * 2 + 1)) + (x * 2) + 1) * 3 + 2] = src[(((wd + 1) / 2) * y + x) * 3 + 2]; } } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { dim3 gdim(CeilDiv(wt, 32), CeilDiv(ht, 16)), bdim(32, 16); //set up /* float *fixed; float *buf1, *buf2; cudaMalloc(&fixed, 3 * wt*ht*sizeof(float)); cudaMalloc(&buf1, 3 * wt*ht*sizeof(float)); cudaMalloc(&buf2, 3 * wt*ht*sizeof(float)); //initialize the iteration CalculateFixed <<< gdim, bdim >>>( background, target, mask, fixed, wb, hb, wt, ht, oy, ox ); cudaMemcpy(buf1, target, sizeof(float) * 3 * wt*ht, cudaMemcpyDeviceToDevice); //iterate //Original for (int i = 0; i < 10000; ++i) { PoissonImageCloningIteration <<<gdim, bdim >>>( fixed, mask, buf1, buf2, wt, ht ); PoissonImageCloningIteration <<<gdim, bdim >>>( fixed, mask, buf2, buf1, wt, ht ); }*/ //Hierachical //declare float *background_hier[4]; float *fixed_hier[4]; float *mask_hier[4]; float *buf1_hier[4]; float *buf2_hier[4]; int wbs[4]; int hbs[4]; int ws[4]; int hs[4]; for (int i = 0; i < 4; i++) { if (i == 0) { ws[i] = wt; hs[i] = ht; wbs[i] = wb; hbs[i] = hb; } else { ws[i] = (ws[i-1] + 1) / 2; hs[i] = (hs[i-1] + 1) / 2; wbs[i] = (wbs[i-1] + 1) / 2; hbs[i] = (hbs[i-1] + 1) / 2; } } //Malloc for (int i = 0; i < 4; i++) { cudaMalloc(&background_hier[i], 3 * wbs[i] * hbs[i] * sizeof(float)); cudaMalloc(&fixed_hier[i], 3 * ws[i] * hs[i] * sizeof(float)); cudaMalloc(&mask_hier[i], ws[i] * hs[i] * sizeof(float)); cudaMalloc(&buf1_hier[i], 3 * ws[i] * hs[i] * sizeof(float)); cudaMalloc(&buf2_hier[i], 3 * ws[i] * hs[i] * sizeof(float)); } //initialize cudaMemcpy(background_hier[0], background, sizeof(float) * 3 * wb * hb, cudaMemcpyDeviceToDevice); cudaMemcpy(mask_hier[0], mask, sizeof(float) * wt*ht, cudaMemcpyDeviceToDevice); cudaMemcpy(buf1_hier[0], target, sizeof(float) * 3 * wt*ht, cudaMemcpyDeviceToDevice); for (int i = 1; i < 4; i++) { ImageShrinking << < dim3(CeilDiv(wbs[i], 32), CeilDiv(hbs[i], 16)), dim3(32, 16) >> > (background_hier[i-1], background_hier[i], wbs[i-1], hbs[i-1]); ImageShrinkingMask << < dim3(CeilDiv(ws[i], 32), CeilDiv(hs[i], 16)), dim3(32, 16) >> > (mask_hier[i-1], mask_hier[i], ws[i-1], hs[i-1]); ImageShrinking << < dim3(CeilDiv(ws[i], 32), CeilDiv(hs[i], 16)), dim3(32, 16) >> > (buf1_hier[i-1], buf1_hier[i], ws[i-1], hs[i-1]); } //fixed for (int i = 0; i < 4; i++) { CalculateFixed << < dim3(CeilDiv(ws[i], 32), CeilDiv(hs[i], 16)), dim3(32, 16) >> >( background_hier[i], buf1_hier[i], mask_hier[i], fixed_hier[i], wbs[i], hbs[i], ws[i], hs[i], (oy / pow(2, i)), (ox / pow(2, i)) ); } //iteration for (int i = 0; i < 4; ++i) { for (int j = 0; j < 500; ++j) { PoissonImageCloningIteration << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> >( fixed_hier[3 - i], mask_hier[3 - i], buf1_hier[3 - i], buf2_hier[3 - i], ws[3 - i], hs[3 - i] ); PoissonImageCloningIteration << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> >( fixed_hier[3 - i], mask_hier[3 - i], buf2_hier[3 - i], buf1_hier[3 - i], ws[3 - i], hs[3 - i] ); } if (i < 3) { ImageUpsample << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> > (buf1_hier[3 - i], buf1_hier[2 - i], ws[2 - i], hs[2 - i]); ImageUpsample << < dim3(CeilDiv(ws[3 - i], 32), CeilDiv(hs[3 - i], 16)), dim3(32, 16) >> > (buf2_hier[3 - i], buf2_hier[2 - i], ws[2 - i], hs[2 - i]); } } //copy the image back cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice); /* SimpleClone <<< gdim, bdim >>>( background, buf1, mask, output, wb, hb, wt, ht, oy, ox );*/ SimpleClone << < gdim, bdim >> >( background, buf1_hier[0], mask, output, wb, hb, wt, ht, oy, ox ); //clean up /* cudaFree(fixed); cudaFree(buf1); cudaFree(buf2);*/ for (int i = 0; i < 4; i++) { cudaFree(fixed_hier[i]); cudaFree(mask_hier[i]); cudaFree(buf1_hier[i]); cudaFree(buf2_hier[i]); } }
9cb954e8bf52fe14dd0cb717b3ec9247cae94d72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void NodesApplyTramplingEffectKernel(float* target, float* distanceToPath, int graphW, int graphH, float pathThickness, float tramplingCoefficient) { int i = 1 + blockIdx.x * blockDim.x + threadIdx.x; int j = 1 + blockIdx.y * blockDim.y + threadIdx.y; if(i <= graphW && j <= graphH) { int index = i + j * (graphW + 2); float t = distanceToPath[index]; t = max(0.0f, min(1.0f, fabsf(t / pathThickness))); t = t * (t * (-4 * t + 6) - 3) + 1; // cubic parabola atomicAdd(&target[index], t * tramplingCoefficient); } }
9cb954e8bf52fe14dd0cb717b3ec9247cae94d72.cu
#include "includes.h" __global__ void NodesApplyTramplingEffectKernel(float* target, float* distanceToPath, int graphW, int graphH, float pathThickness, float tramplingCoefficient) { int i = 1 + blockIdx.x * blockDim.x + threadIdx.x; int j = 1 + blockIdx.y * blockDim.y + threadIdx.y; if(i <= graphW && j <= graphH) { int index = i + j * (graphW + 2); float t = distanceToPath[index]; t = max(0.0f, min(1.0f, fabsf(t / pathThickness))); t = t * (t * (-4 * t + 6) - 3) + 1; // cubic parabola atomicAdd(&target[index], t * tramplingCoefficient); } }
a4d717a20fdfaf6576ce67da563f022676d805af.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 2 #define TW 6 #define TC 32 #define C 160 #define N 96 #define H 28 #define W 28 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(hipError_t code) { if (code != hipSuccess) { std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[1920]; __shared__ float kernel_shared[1152]; float pad_temp_shared_local[64]; float kernel_shared_local[48]; compute_local[(0)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 10; ++rc_outer) { __syncthreads(); pad_temp_shared[(((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 35) / 120) * 784)) + (((int)blockIdx.y) * 56)) + ((((((int)threadIdx.x) * 35) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 1))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 1) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 1) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 2))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 2) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 2) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 3))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 3) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 3) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 4))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 4) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 4) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 5))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 5) % 30))) && ((((((int)threadIdx.x) * 35) + 5) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 5) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 5) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 5) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 6))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 6) % 30))) && ((((((int)threadIdx.x) * 35) + 6) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 6) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 6) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 6) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 7))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 7) % 30))) && ((((((int)threadIdx.x) * 35) + 7) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 7) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 7) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 7) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 8))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 8) % 30))) && ((((((int)threadIdx.x) * 35) + 8) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 8) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 8) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 8) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 9))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 9) % 30))) && ((((((int)threadIdx.x) * 35) + 9) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 9) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 9) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 9) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 10))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 10) % 30))) && ((((((int)threadIdx.x) * 35) + 10) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 10) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 10) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 10) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 11))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 11) % 30))) && ((((((int)threadIdx.x) * 35) + 11) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 11) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 11) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 11) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 12))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 12) % 30))) && ((((((int)threadIdx.x) * 35) + 12) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 12) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 12) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 12) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 13))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 13) % 30))) && ((((((int)threadIdx.x) * 35) + 13) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 13) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 13) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 13) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 14))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 14) % 30))) && ((((((int)threadIdx.x) * 35) + 14) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 14) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 14) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 14) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 15))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 15) % 30))) && ((((((int)threadIdx.x) * 35) + 15) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 15) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 15) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 15) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 16))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 16) % 30))) && ((((((int)threadIdx.x) * 35) + 16) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 16) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 16) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 16) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 17))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 17) % 30))) && ((((((int)threadIdx.x) * 35) + 17) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 17) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 17) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 17) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 18))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 18) % 30))) && ((((((int)threadIdx.x) * 35) + 18) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 18) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 18) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 18) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 19))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 19) % 30))) && ((((((int)threadIdx.x) * 35) + 19) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 19) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 19) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 19) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 20))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 20) % 30))) && ((((((int)threadIdx.x) * 35) + 20) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 20) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 20) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 20) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 21))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 21) % 30))) && ((((((int)threadIdx.x) * 35) + 21) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 21) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 21) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 21) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 22))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 22) % 30))) && ((((((int)threadIdx.x) * 35) + 22) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 22) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 22) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 22) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 23))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 23) % 30))) && ((((((int)threadIdx.x) * 35) + 23) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 23) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 23) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 23) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 24))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 24) % 30))) && ((((((int)threadIdx.x) * 35) + 24) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 24) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 24) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 24) % 30)) - 29))] : 0.000000e+00f); if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 25) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 25) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1895) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 25))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 25) % 30))) && ((((((int)threadIdx.x) * 35) + 25) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 25) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 25) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 25) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 26) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 26) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1894) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 26))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 26) % 30))) && ((((((int)threadIdx.x) * 35) + 26) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 26) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 26) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 26) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 27) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 27) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1893) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 27))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 27) % 30))) && ((((((int)threadIdx.x) * 35) + 27) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 27) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 27) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 27) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 28) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 28) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1892) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 28))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 28) % 30))) && ((((((int)threadIdx.x) * 35) + 28) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 28) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 28) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 28) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 29) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 29) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1891) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 29))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 29) % 30))) && ((((((int)threadIdx.x) * 35) + 29) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 29) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 29) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 29) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 30) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 35) / 30)) < 63) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1890) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 30))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 30) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 30) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 31) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 31) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1889) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 31))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 31) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 31) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 32) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 32) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1888) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 32))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 32) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 32) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 33) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 33) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1887) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 33))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 33) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 33) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 34) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 34) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1886) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 34))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 34) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 34) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f); } } } } kernel_shared[(((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 1))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 2))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 3))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 4))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 5))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 6))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 7))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 8))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 9))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 10))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 11))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 12))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 13))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 14))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 2))]; if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1137) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 15))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1136) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 16))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 1))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1135) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 17))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 2))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1134) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 18))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1133) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 19))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 1))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1132) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 20))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 2))]; } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) { for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) { pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)))]; pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 1))]; pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 2))]; pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 3))]; pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 30))]; pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 31))]; pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 32))]; pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 33))]; pad_temp_shared_local[(8)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 120))]; pad_temp_shared_local[(9)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 121))]; pad_temp_shared_local[(10)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 122))]; pad_temp_shared_local[(11)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 123))]; pad_temp_shared_local[(12)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 150))]; pad_temp_shared_local[(13)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 151))]; pad_temp_shared_local[(14)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 152))]; pad_temp_shared_local[(15)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 153))]; pad_temp_shared_local[(16)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 240))]; pad_temp_shared_local[(17)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 241))]; pad_temp_shared_local[(18)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 242))]; pad_temp_shared_local[(19)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 243))]; pad_temp_shared_local[(20)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 270))]; pad_temp_shared_local[(21)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 271))]; pad_temp_shared_local[(22)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 272))]; pad_temp_shared_local[(23)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 273))]; pad_temp_shared_local[(24)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 360))]; pad_temp_shared_local[(25)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 361))]; pad_temp_shared_local[(26)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 362))]; pad_temp_shared_local[(27)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 363))]; pad_temp_shared_local[(28)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 390))]; pad_temp_shared_local[(29)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 391))]; pad_temp_shared_local[(30)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 392))]; pad_temp_shared_local[(31)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 393))]; pad_temp_shared_local[(32)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 480))]; pad_temp_shared_local[(33)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 481))]; pad_temp_shared_local[(34)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 482))]; pad_temp_shared_local[(35)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 483))]; pad_temp_shared_local[(36)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 510))]; pad_temp_shared_local[(37)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 511))]; pad_temp_shared_local[(38)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 512))]; pad_temp_shared_local[(39)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 513))]; pad_temp_shared_local[(40)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 600))]; pad_temp_shared_local[(41)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 601))]; pad_temp_shared_local[(42)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 602))]; pad_temp_shared_local[(43)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 603))]; pad_temp_shared_local[(44)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 630))]; pad_temp_shared_local[(45)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 631))]; pad_temp_shared_local[(46)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 632))]; pad_temp_shared_local[(47)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 633))]; pad_temp_shared_local[(48)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 720))]; pad_temp_shared_local[(49)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 721))]; pad_temp_shared_local[(50)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 722))]; pad_temp_shared_local[(51)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 723))]; pad_temp_shared_local[(52)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 750))]; pad_temp_shared_local[(53)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 751))]; pad_temp_shared_local[(54)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 752))]; pad_temp_shared_local[(55)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 753))]; pad_temp_shared_local[(56)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 840))]; pad_temp_shared_local[(57)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 841))]; pad_temp_shared_local[(58)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 842))]; pad_temp_shared_local[(59)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 843))]; pad_temp_shared_local[(60)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 870))]; pad_temp_shared_local[(61)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 871))]; pad_temp_shared_local[(62)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 872))]; pad_temp_shared_local[(63)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 873))]; kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)))]; kernel_shared_local[(24)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 576))]; kernel_shared_local[(1)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 1))]; kernel_shared_local[(25)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 577))]; kernel_shared_local[(2)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 2))]; kernel_shared_local[(26)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 578))]; kernel_shared_local[(3)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 9))]; kernel_shared_local[(27)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 585))]; kernel_shared_local[(4)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 10))]; kernel_shared_local[(28)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 586))]; kernel_shared_local[(5)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 11))]; kernel_shared_local[(29)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 587))]; kernel_shared_local[(6)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 18))]; kernel_shared_local[(30)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 594))]; kernel_shared_local[(7)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 19))]; kernel_shared_local[(31)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 595))]; kernel_shared_local[(8)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 20))]; kernel_shared_local[(32)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 596))]; kernel_shared_local[(9)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 27))]; kernel_shared_local[(33)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 603))]; kernel_shared_local[(10)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 28))]; kernel_shared_local[(34)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 604))]; kernel_shared_local[(11)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 29))]; kernel_shared_local[(35)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 605))]; kernel_shared_local[(12)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 36))]; kernel_shared_local[(36)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 612))]; kernel_shared_local[(13)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 37))]; kernel_shared_local[(37)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 613))]; kernel_shared_local[(14)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 38))]; kernel_shared_local[(38)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 614))]; kernel_shared_local[(15)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 45))]; kernel_shared_local[(39)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 621))]; kernel_shared_local[(16)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 46))]; kernel_shared_local[(40)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 622))]; kernel_shared_local[(17)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 47))]; kernel_shared_local[(41)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 623))]; kernel_shared_local[(18)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 54))]; kernel_shared_local[(42)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 630))]; kernel_shared_local[(19)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 55))]; kernel_shared_local[(43)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 631))]; kernel_shared_local[(20)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 56))]; kernel_shared_local[(44)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 632))]; kernel_shared_local[(21)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 63))]; kernel_shared_local[(45)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 639))]; kernel_shared_local[(22)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 64))]; kernel_shared_local[(46)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 640))]; kernel_shared_local[(23)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 65))]; kernel_shared_local[(47)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 641))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(24)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(24)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(24)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(24)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(25)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(25)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(25)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(25)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(26)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(26)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(2)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(26)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(26)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(27)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(3)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(27)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(27)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(27)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(28)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(4)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(28)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(4)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(28)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(4)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(28)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(5)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(29)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(5)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(29)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(5)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(29)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(5)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(29)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(6)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(30)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(6)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(30)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(6)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(30)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(6)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(30)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(7)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(31)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(7)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(31)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(7)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(31)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(7)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(31)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(8)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(32)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(8)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(32)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(8)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(32)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(8)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(32)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(9)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(33)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(9)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(33)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(9)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(33)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(9)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(33)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(10)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(34)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(10)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(34)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(10)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(34)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(10)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(34)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(11)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(35)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(11)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(35)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(11)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(35)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(11)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(35)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(12)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(36)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(12)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(36)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(12)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(36)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(12)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(36)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(13)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(37)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(13)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(37)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(13)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(37)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(13)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(37)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(14)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(38)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(14)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(38)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(14)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(38)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(14)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(38)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(15)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(39)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(15)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(39)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(15)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(39)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(15)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(39)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(16)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(40)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(16)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(40)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(16)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(40)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(16)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(40)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(17)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(41)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(17)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(41)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(17)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(41)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(17)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(41)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(18)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(42)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(18)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(42)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(18)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(42)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(18)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(42)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(19)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(43)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(19)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(43)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(19)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(43)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(19)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(43)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(20)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(44)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(20)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(44)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(20)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(44)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(20)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(44)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(21)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(45)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(21)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(45)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(21)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(45)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(21)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(45)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(22)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(46)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(22)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(46)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(22)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(46)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(22)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(46)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(23)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(47)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(23)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(47)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(23)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(47)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(23)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(47)])); } } } compute[(((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3136))] = compute_local[(4)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3137))] = compute_local[(5)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 28))] = compute_local[(2)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3164))] = compute_local[(6)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 29))] = compute_local[(3)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3165))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 5: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 5; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 6: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 6; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; case 2: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 5: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 5; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 6: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 6; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[0]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[0]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[1]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[1]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[3]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[3]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[4]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[4]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[6]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[6]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[7]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[7]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 7]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; hipMalloc(&device_input,C*H*W*sizeof(float)); hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; hipEvent_t event_start; hipEvent_t event_stop; hipEventCreate(&event_start); hipEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; hipMalloc(&device_out,H*W*N*sizeof(float)); hipMemset(device_out,0,H*W*N*sizeof(float)); hipMalloc(&device_K,C*N*9*sizeof(float)); hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice); hipEventRecord(event_start); convGemm.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnGemmTime; hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop); hipEventRecord(event_start); convWinogradeNon.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnWinogradeTimeNon; hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); hipEventRecord(event_start); convFFT.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnFFTTime; hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(1,14,12); dim3 block(14,1,4); hipEventRecord(event_start); hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tvm; hipEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); hipMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); hipEventRecord(event_start); hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tdc; hipEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
a4d717a20fdfaf6576ce67da563f022676d805af.cu
#include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 2 #define TW 6 #define TC 32 #define C 160 #define N 96 #define H 28 #define W 28 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(cudaError_t code) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[1920]; __shared__ float kernel_shared[1152]; float pad_temp_shared_local[64]; float kernel_shared_local[48]; compute_local[(0)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 10; ++rc_outer) { __syncthreads(); pad_temp_shared[(((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)))] = (((((1 <= ((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30))) && (((((int)blockIdx.y) * 2) + (((((int)threadIdx.x) * 35) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + (((((int)threadIdx.x) * 35) / 120) * 784)) + (((int)blockIdx.y) * 56)) + ((((((int)threadIdx.x) * 35) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 1))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 1) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 1) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 1) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 2))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 2) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 2) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 2) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 3))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 3) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 3) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 3) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 4))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 4) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 4) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 4) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 5))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 5) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 5) % 30))) && ((((((int)threadIdx.x) * 35) + 5) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 5) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 5) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 5) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 6))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 6) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 6) % 30))) && ((((((int)threadIdx.x) * 35) + 6) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 6) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 6) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 6) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 7))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 7) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 7) % 30))) && ((((((int)threadIdx.x) * 35) + 7) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 7) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 7) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 7) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 8))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 8) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 8) % 30))) && ((((((int)threadIdx.x) * 35) + 8) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 8) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 8) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 8) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 9))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 9) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 9) % 30))) && ((((((int)threadIdx.x) * 35) + 9) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 9) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 9) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 9) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 10))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 10) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 10) % 30))) && ((((((int)threadIdx.x) * 35) + 10) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 10) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 10) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 10) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 11))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 11) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 11) % 30))) && ((((((int)threadIdx.x) * 35) + 11) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 11) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 11) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 11) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 12))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 12) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 12) % 30))) && ((((((int)threadIdx.x) * 35) + 12) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 12) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 12) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 12) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 13))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 13) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 13) % 30))) && ((((((int)threadIdx.x) * 35) + 13) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 13) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 13) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 13) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 14))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 14) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 14) % 30))) && ((((((int)threadIdx.x) * 35) + 14) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 14) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 14) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 14) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 15))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 15) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 15) % 30))) && ((((((int)threadIdx.x) * 35) + 15) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 15) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 15) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 15) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 16))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 16) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 16) % 30))) && ((((((int)threadIdx.x) * 35) + 16) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 16) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 16) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 16) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 17))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 17) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 17) % 30))) && ((((((int)threadIdx.x) * 35) + 17) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 17) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 17) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 17) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 18))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 18) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 18) % 30))) && ((((((int)threadIdx.x) * 35) + 18) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 18) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 18) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 18) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 19))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 19) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 19) % 30))) && ((((((int)threadIdx.x) * 35) + 19) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 19) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 19) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 19) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 20))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 20) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 20) % 30))) && ((((((int)threadIdx.x) * 35) + 20) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 20) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 20) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 20) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 21))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 21) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 21) % 30))) && ((((((int)threadIdx.x) * 35) + 21) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 21) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 21) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 21) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 22))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 22) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 22) % 30))) && ((((((int)threadIdx.x) * 35) + 22) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 22) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 22) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 22) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 23))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 23) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 23) % 30))) && ((((((int)threadIdx.x) * 35) + 23) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 23) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 23) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 23) % 30)) - 29))] : 0.000000e+00f); pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 24))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 24) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 24) % 30))) && ((((((int)threadIdx.x) * 35) + 24) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 24) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 24) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 24) % 30)) - 29))] : 0.000000e+00f); if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 25) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 25) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1895) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 25))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 25) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 25) % 30))) && ((((((int)threadIdx.x) * 35) + 25) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 25) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 25) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 25) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 26) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 26) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1894) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 26))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 26) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 26) % 30))) && ((((((int)threadIdx.x) * 35) + 26) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 26) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 26) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 26) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 27) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 27) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1893) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 27))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 27) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 27) % 30))) && ((((((int)threadIdx.x) * 35) + 27) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 27) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 27) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 27) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 28) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 28) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1892) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 28))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 28) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 28) % 30))) && ((((((int)threadIdx.x) * 35) + 28) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 28) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 28) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 28) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 29) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 29) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1891) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 29))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 29) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 29) % 30))) && ((((((int)threadIdx.x) * 35) + 29) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 29) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 29) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 29) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 30) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + ((((int)threadIdx.x) * 35) / 30)) < 63) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1890) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 30))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 30) % 120) / 30)) < 29)) && (1 <= ((((int)threadIdx.x) * 35) % 30))) && (((((int)threadIdx.x) * 35) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 30) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 30) % 120) / 30) * 28)) + ((((int)threadIdx.x) * 35) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 31) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 31) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1889) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 31))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 31) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 1) % 30))) && ((((((int)threadIdx.x) * 35) + 1) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 31) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 31) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 1) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 32) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 32) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1888) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 32))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 32) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 2) % 30))) && ((((((int)threadIdx.x) * 35) + 2) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 32) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 32) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 2) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 33) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 33) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1887) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 33))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 33) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 3) % 30))) && ((((((int)threadIdx.x) * 35) + 3) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 33) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 33) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 3) % 30)) - 29))] : 0.000000e+00f); } } } } if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 35) + 34) / 120)) < 16) { if (((((int)threadIdx.z) * 16) + (((((int)threadIdx.x) * 35) + 34) / 30)) < 64) { if (((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) < 1886) { if (((int)threadIdx.x) < 13) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.x) * 35)) + 34))] = (((((1 <= ((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30))) && (((((int)blockIdx.y) * 2) + ((((((int)threadIdx.x) * 35) + 34) % 120) / 30)) < 29)) && (1 <= (((((int)threadIdx.x) * 35) + 4) % 30))) && ((((((int)threadIdx.x) * 35) + 4) % 30) < 29)) ? data[((((((((rc_outer * 12544) + (((int)threadIdx.z) * 3136)) + ((((((int)threadIdx.x) * 35) + 34) / 120) * 784)) + (((int)blockIdx.y) * 56)) + (((((((int)threadIdx.x) * 35) + 34) % 120) / 30) * 28)) + (((((int)threadIdx.x) * 35) + 4) % 30)) - 29))] : 0.000000e+00f); } } } } kernel_shared[(((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 1))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 2))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + (((((int)threadIdx.x) * 7) / 48) * 1440)) + (rc_outer * 144)) + (((((int)threadIdx.x) * 7) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 3))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 4))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 5))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 1) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 6))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 7))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 8))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 2) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 9))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 10))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 11))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 3) % 48) * 3)) + 2))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 12))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 13))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 1))]; kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 14))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 4) % 48) * 3)) + 2))]; if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1137) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 15))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1136) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 16))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 1))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 128) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 379) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1135) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 17))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 5) % 48) * 3)) + 2))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1134) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 18))] = kernel[((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1133) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 19))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 1))]; } } } } } if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 8) { if (((((int)threadIdx.z) * 32) + ((((int)threadIdx.x) * 7) / 3)) < 126) { if (((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 7)) < 378) { if (((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) < 1132) { if (((int)threadIdx.x) < 13) { kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 21)) + 20))] = kernel[(((((((((int)blockIdx.z) * 11520) + (((int)threadIdx.z) * 2880)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 1440)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) + 6) % 48) * 3)) + 2))]; } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) { for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) { pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)))]; pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 1))]; pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 2))]; pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 3))]; pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 30))]; pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 31))]; pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 32))]; pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 33))]; pad_temp_shared_local[(8)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 120))]; pad_temp_shared_local[(9)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 121))]; pad_temp_shared_local[(10)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 122))]; pad_temp_shared_local[(11)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 123))]; pad_temp_shared_local[(12)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 150))]; pad_temp_shared_local[(13)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 151))]; pad_temp_shared_local[(14)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 152))]; pad_temp_shared_local[(15)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 153))]; pad_temp_shared_local[(16)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 240))]; pad_temp_shared_local[(17)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 241))]; pad_temp_shared_local[(18)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 242))]; pad_temp_shared_local[(19)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 243))]; pad_temp_shared_local[(20)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 270))]; pad_temp_shared_local[(21)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 271))]; pad_temp_shared_local[(22)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 272))]; pad_temp_shared_local[(23)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 273))]; pad_temp_shared_local[(24)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 360))]; pad_temp_shared_local[(25)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 361))]; pad_temp_shared_local[(26)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 362))]; pad_temp_shared_local[(27)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 363))]; pad_temp_shared_local[(28)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 390))]; pad_temp_shared_local[(29)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 391))]; pad_temp_shared_local[(30)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 392))]; pad_temp_shared_local[(31)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 393))]; pad_temp_shared_local[(32)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 480))]; pad_temp_shared_local[(33)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 481))]; pad_temp_shared_local[(34)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 482))]; pad_temp_shared_local[(35)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 483))]; pad_temp_shared_local[(36)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 510))]; pad_temp_shared_local[(37)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 511))]; pad_temp_shared_local[(38)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 512))]; pad_temp_shared_local[(39)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 513))]; pad_temp_shared_local[(40)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 600))]; pad_temp_shared_local[(41)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 601))]; pad_temp_shared_local[(42)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 602))]; pad_temp_shared_local[(43)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 603))]; pad_temp_shared_local[(44)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 630))]; pad_temp_shared_local[(45)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 631))]; pad_temp_shared_local[(46)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 632))]; pad_temp_shared_local[(47)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 633))]; pad_temp_shared_local[(48)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 720))]; pad_temp_shared_local[(49)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 721))]; pad_temp_shared_local[(50)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 722))]; pad_temp_shared_local[(51)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 723))]; pad_temp_shared_local[(52)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 750))]; pad_temp_shared_local[(53)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 751))]; pad_temp_shared_local[(54)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 752))]; pad_temp_shared_local[(55)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 753))]; pad_temp_shared_local[(56)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 840))]; pad_temp_shared_local[(57)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 841))]; pad_temp_shared_local[(58)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 842))]; pad_temp_shared_local[(59)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 843))]; pad_temp_shared_local[(60)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 870))]; pad_temp_shared_local[(61)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 871))]; pad_temp_shared_local[(62)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 872))]; pad_temp_shared_local[(63)] = pad_temp_shared[(((((rc_inner_outer * 960) + (ry_inner_outer * 30)) + (((int)threadIdx.x) * 2)) + 873))]; kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)))]; kernel_shared_local[(24)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 576))]; kernel_shared_local[(1)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 1))]; kernel_shared_local[(25)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 577))]; kernel_shared_local[(2)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 2))]; kernel_shared_local[(26)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 578))]; kernel_shared_local[(3)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 9))]; kernel_shared_local[(27)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 585))]; kernel_shared_local[(4)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 10))]; kernel_shared_local[(28)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 586))]; kernel_shared_local[(5)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 11))]; kernel_shared_local[(29)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 587))]; kernel_shared_local[(6)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 18))]; kernel_shared_local[(30)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 594))]; kernel_shared_local[(7)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 19))]; kernel_shared_local[(31)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 595))]; kernel_shared_local[(8)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 20))]; kernel_shared_local[(32)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 596))]; kernel_shared_local[(9)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 27))]; kernel_shared_local[(33)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 603))]; kernel_shared_local[(10)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 28))]; kernel_shared_local[(34)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 604))]; kernel_shared_local[(11)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 29))]; kernel_shared_local[(35)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 605))]; kernel_shared_local[(12)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 36))]; kernel_shared_local[(36)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 612))]; kernel_shared_local[(13)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 37))]; kernel_shared_local[(37)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 613))]; kernel_shared_local[(14)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 38))]; kernel_shared_local[(38)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 614))]; kernel_shared_local[(15)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 45))]; kernel_shared_local[(39)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 621))]; kernel_shared_local[(16)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 46))]; kernel_shared_local[(40)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 622))]; kernel_shared_local[(17)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 47))]; kernel_shared_local[(41)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 623))]; kernel_shared_local[(18)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 54))]; kernel_shared_local[(42)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 630))]; kernel_shared_local[(19)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 55))]; kernel_shared_local[(43)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 631))]; kernel_shared_local[(20)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 56))]; kernel_shared_local[(44)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 632))]; kernel_shared_local[(21)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 63))]; kernel_shared_local[(45)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 639))]; kernel_shared_local[(22)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 64))]; kernel_shared_local[(46)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 640))]; kernel_shared_local[(23)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 65))]; kernel_shared_local[(47)] = kernel_shared[(((((((int)threadIdx.z) * 144) + (rc_inner_outer * 72)) + (ry_inner_outer * 3)) + 641))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(24)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(24)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(24)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(24)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(25)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(25)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(25)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(25)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(26)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(26)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(2)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(26)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(26)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(27)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(3)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(27)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(27)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(27)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(28)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(4)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(28)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(4)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(28)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(4)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(28)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(5)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(29)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(5)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(29)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(5)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(29)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(5)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(29)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(6)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(30)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(6)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(30)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(6)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(30)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(6)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(30)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(7)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(31)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(7)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(31)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(7)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(31)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(7)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(31)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(8)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(32)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(8)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(32)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(8)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(32)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(8)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(32)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(9)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(33)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(9)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(33)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(9)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(33)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(9)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(33)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(10)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(34)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(10)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(34)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(10)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(34)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(10)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(34)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(11)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(35)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(11)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(35)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(11)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(35)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(11)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(35)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(12)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(32)] * kernel_shared_local[(36)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(12)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(36)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(12)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(36)] * kernel_shared_local[(36)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(12)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(36)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(13)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(33)] * kernel_shared_local[(37)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(13)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(37)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(13)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(37)] * kernel_shared_local[(37)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(13)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(37)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(14)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(34)] * kernel_shared_local[(38)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(14)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(35)] * kernel_shared_local[(38)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(14)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(38)] * kernel_shared_local[(38)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(14)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(39)] * kernel_shared_local[(38)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(15)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(40)] * kernel_shared_local[(39)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(15)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(39)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(15)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(44)] * kernel_shared_local[(39)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(15)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(39)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(16)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(41)] * kernel_shared_local[(40)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(16)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(40)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(16)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(45)] * kernel_shared_local[(40)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(16)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(40)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(17)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(42)] * kernel_shared_local[(41)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(17)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(43)] * kernel_shared_local[(41)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(17)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(46)] * kernel_shared_local[(41)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(17)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(47)] * kernel_shared_local[(41)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(18)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(48)] * kernel_shared_local[(42)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(18)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(42)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(18)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(52)] * kernel_shared_local[(42)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(18)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(42)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(19)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(49)] * kernel_shared_local[(43)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(19)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(43)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(19)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(53)] * kernel_shared_local[(43)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(19)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(43)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(20)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(50)] * kernel_shared_local[(44)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(20)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(51)] * kernel_shared_local[(44)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(20)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(54)] * kernel_shared_local[(44)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(20)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(55)] * kernel_shared_local[(44)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(21)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(56)] * kernel_shared_local[(45)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(21)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(45)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(21)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(60)] * kernel_shared_local[(45)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(21)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(45)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(22)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(57)] * kernel_shared_local[(46)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(22)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(46)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(22)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(61)] * kernel_shared_local[(46)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(22)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(46)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(23)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(58)] * kernel_shared_local[(47)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(23)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(59)] * kernel_shared_local[(47)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(23)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(62)] * kernel_shared_local[(47)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(23)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(63)] * kernel_shared_local[(47)])); } } } compute[(((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3136))] = compute_local[(4)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3137))] = compute_local[(5)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 28))] = compute_local[(2)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3164))] = compute_local[(6)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 29))] = compute_local[(3)]; compute[((((((((int)blockIdx.z) * 6272) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 56)) + (((int)threadIdx.x) * 2)) + 3165))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 5: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 5; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 6: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 6; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; case 2: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 5: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 5; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 6: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 6; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[2]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[0]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[0]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[1]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[1]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[2]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[5]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[3]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[3]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[4]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[4]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[5]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[8]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[6]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[6]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[7]; temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[7]; temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[8]; temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 7]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; cudaMalloc(&device_input,C*H*W*sizeof(float)); cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; cudaEvent_t event_start; cudaEvent_t event_stop; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; cudaMalloc(&device_out,H*W*N*sizeof(float)); cudaMemset(device_out,0,H*W*N*sizeof(float)); cudaMalloc(&device_K,C*N*9*sizeof(float)); cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(event_start); convGemm.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnGemmTime; cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop); cudaEventRecord(event_start); convWinogradeNon.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnWinogradeTimeNon; cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); cudaEventRecord(event_start); convFFT.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnFFTTime; cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(1,14,12); dim3 block(14,1,4); cudaEventRecord(event_start); default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tvm; cudaEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); cudaMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); cudaEventRecord(event_start); conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tdc; cudaEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
71aec6b920902b740e0ab1472546134c978078de.hip
// !!! This is a file automatically generated by hipify!!! /////////////////////////////////////////////////////////////////////////////// // // The MIT License // // Copyright (c) 2006 Scientific Computing and Imaging Institute, // University of Utah (USA) // // License for the specific language governing rights and limitations under // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // /////////////////////////////////////////////////////////////////////////////// #ifndef ELVIS_CYLINDER_CU #define ELVIS_CYLINDER_CU #include <optix_cuda.h> #include <optix_math.h> #include <optixu/optixu_matrix.h> #include <optixu/optixu_aabb.h> #include "CutSurfacePayloads.cu" #include "util.hip" #include <ElVis/Core/OptixVariables.cu> // Intersection program for cannonical cylinder of height 1 and centered // at the origin. We expect users to provide transformation nodes // to rotate and resize the cylinder as needed. RT_PROGRAM void CylinderIntersect( int primIdx ) { ElVisFloat3 d = MakeFloat3(ray.direction); ElVisFloat3 o = MakeFloat3(ray.origin); ElVisFloat A = d.x*d.x + d.y*d.y; ElVisFloat B = MAKE_FLOAT(2.0)*(o.x*d.x + o.y*d.y); ElVisFloat C = o.x*o.x + o.y*o.y - MAKE_FLOAT(1.0); ElVisFloat D = B*B - MAKE_FLOAT(4.0)*A*C; if( D < MAKE_FLOAT(0.0) ) { return; } // In this case we know that there is at least 1 intersection. ElVisFloat denom = MAKE_FLOAT(2.0) * A; ElVisFloat square_D = Sqrtf(D); // Of the two roots, this is the one which is closest to the viewer. ElVisFloat t1 = (-B - square_D)/denom; if( t1 > MAKE_FLOAT(0.0) ) { const ElVisFloat3 intersectionPoint = o + t1 * d; if( intersectionPoint.z >= MAKE_FLOAT(0.0) && intersectionPoint.z <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t1 ) ) { normal = MakeFloat3(intersectionPoint.x, intersectionPoint.y, MAKE_FLOAT(0.0)); normalize(normal); rtReportIntersection(0); } } } // TODO - Uncommenting the rest of this methods causes failure. On 1/18/2011 I postponed this so I could finish // some timing tests, but it needs to be addressed. ElVisFloat t2 = (-B + square_D)/denom; if( t2 > MAKE_FLOAT(0.0) ) { const ElVisFloat3 intersectionPoint = o + t2 * d; if( intersectionPoint.z >= MAKE_FLOAT(0.0) && intersectionPoint.z <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t2 ) ) { // Uncomment the following line for error in Cuda 3.0 and Optix 2.0 and sm_20 // Cuda 3.0 Optix 2.0 sm_20 - x // Cuda 3.0 Optix 2.0 sm_13 - x // Cuda 3.0 Optix 2.1 sm_20 - Works // Cuda 3.0 Optix 2.1 sm_13 // Cuda 3.1 Optix 2.1 sm_20 // Cuda 3.1 Optix 2.1 sm_13 // Cuda 3.2 Optix 2.1 sm_20 // Cuda 3.2 Optix 2.1 sm_13 // normal = MakeFloat3(intersectionPoint.x, intersectionPoint.y, MAKE_FLOAT(0.0)); normalize(normal); rtReportIntersection(0); } } } ElVisFloat4 cap0 = MakeFloat4(MAKE_FLOAT(0.0), MAKE_FLOAT(0.0), MAKE_FLOAT(-1.0), MAKE_FLOAT(0.0)); ElVisFloat t3; if( FindPlaneIntersection(o, d, cap0, t3) ) { const ElVisFloat3 intersectionPoint = o + t3 * d; if( intersectionPoint.x*intersectionPoint.x + intersectionPoint.y*intersectionPoint.y <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t3 ) ) { normal = MakeFloat3(cap0.x, cap0.y, cap0.z); rtReportIntersection(0); } } } ElVisFloat4 cap1 = MakeFloat4(MAKE_FLOAT(0.0), MAKE_FLOAT(0.0), MAKE_FLOAT(1.0), MAKE_FLOAT(-1.0) ); ElVisFloat t4; if( FindPlaneIntersection(o, d, cap1, t4) ) { const ElVisFloat3 intersectionPoint = o + t4 * d; if( intersectionPoint.x*intersectionPoint.x + intersectionPoint.y*intersectionPoint.y <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t4 ) ) { normal = MakeFloat3(cap1.x, cap1.y, cap1.z); rtReportIntersection(0); } } } } // Bounding box for the cannonical cylinder. RT_PROGRAM void CylinderBounding (int, float result[6]) { optix::Aabb* aabb = (optix::Aabb*)result; aabb->m_min = make_float3(-1.0f, -1.0f, -1.0f); aabb->m_max = make_float3(1.0f, 1.0f, 1.0f); } #endif
71aec6b920902b740e0ab1472546134c978078de.cu
/////////////////////////////////////////////////////////////////////////////// // // The MIT License // // Copyright (c) 2006 Scientific Computing and Imaging Institute, // University of Utah (USA) // // License for the specific language governing rights and limitations under // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // /////////////////////////////////////////////////////////////////////////////// #ifndef ELVIS_CYLINDER_CU #define ELVIS_CYLINDER_CU #include <optix_cuda.h> #include <optix_math.h> #include <optixu/optixu_matrix.h> #include <optixu/optixu_aabb.h> #include "CutSurfacePayloads.cu" #include "util.cu" #include <ElVis/Core/OptixVariables.cu> // Intersection program for cannonical cylinder of height 1 and centered // at the origin. We expect users to provide transformation nodes // to rotate and resize the cylinder as needed. RT_PROGRAM void CylinderIntersect( int primIdx ) { ElVisFloat3 d = MakeFloat3(ray.direction); ElVisFloat3 o = MakeFloat3(ray.origin); ElVisFloat A = d.x*d.x + d.y*d.y; ElVisFloat B = MAKE_FLOAT(2.0)*(o.x*d.x + o.y*d.y); ElVisFloat C = o.x*o.x + o.y*o.y - MAKE_FLOAT(1.0); ElVisFloat D = B*B - MAKE_FLOAT(4.0)*A*C; if( D < MAKE_FLOAT(0.0) ) { return; } // In this case we know that there is at least 1 intersection. ElVisFloat denom = MAKE_FLOAT(2.0) * A; ElVisFloat square_D = Sqrtf(D); // Of the two roots, this is the one which is closest to the viewer. ElVisFloat t1 = (-B - square_D)/denom; if( t1 > MAKE_FLOAT(0.0) ) { const ElVisFloat3 intersectionPoint = o + t1 * d; if( intersectionPoint.z >= MAKE_FLOAT(0.0) && intersectionPoint.z <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t1 ) ) { normal = MakeFloat3(intersectionPoint.x, intersectionPoint.y, MAKE_FLOAT(0.0)); normalize(normal); rtReportIntersection(0); } } } // TODO - Uncommenting the rest of this methods causes failure. On 1/18/2011 I postponed this so I could finish // some timing tests, but it needs to be addressed. ElVisFloat t2 = (-B + square_D)/denom; if( t2 > MAKE_FLOAT(0.0) ) { const ElVisFloat3 intersectionPoint = o + t2 * d; if( intersectionPoint.z >= MAKE_FLOAT(0.0) && intersectionPoint.z <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t2 ) ) { // Uncomment the following line for error in Cuda 3.0 and Optix 2.0 and sm_20 // Cuda 3.0 Optix 2.0 sm_20 - x // Cuda 3.0 Optix 2.0 sm_13 - x // Cuda 3.0 Optix 2.1 sm_20 - Works // Cuda 3.0 Optix 2.1 sm_13 // Cuda 3.1 Optix 2.1 sm_20 // Cuda 3.1 Optix 2.1 sm_13 // Cuda 3.2 Optix 2.1 sm_20 // Cuda 3.2 Optix 2.1 sm_13 // normal = MakeFloat3(intersectionPoint.x, intersectionPoint.y, MAKE_FLOAT(0.0)); normalize(normal); rtReportIntersection(0); } } } ElVisFloat4 cap0 = MakeFloat4(MAKE_FLOAT(0.0), MAKE_FLOAT(0.0), MAKE_FLOAT(-1.0), MAKE_FLOAT(0.0)); ElVisFloat t3; if( FindPlaneIntersection(o, d, cap0, t3) ) { const ElVisFloat3 intersectionPoint = o + t3 * d; if( intersectionPoint.x*intersectionPoint.x + intersectionPoint.y*intersectionPoint.y <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t3 ) ) { normal = MakeFloat3(cap0.x, cap0.y, cap0.z); rtReportIntersection(0); } } } ElVisFloat4 cap1 = MakeFloat4(MAKE_FLOAT(0.0), MAKE_FLOAT(0.0), MAKE_FLOAT(1.0), MAKE_FLOAT(-1.0) ); ElVisFloat t4; if( FindPlaneIntersection(o, d, cap1, t4) ) { const ElVisFloat3 intersectionPoint = o + t4 * d; if( intersectionPoint.x*intersectionPoint.x + intersectionPoint.y*intersectionPoint.y <= MAKE_FLOAT(1.0) ) { if( rtPotentialIntersection( t4 ) ) { normal = MakeFloat3(cap1.x, cap1.y, cap1.z); rtReportIntersection(0); } } } } // Bounding box for the cannonical cylinder. RT_PROGRAM void CylinderBounding (int, float result[6]) { optix::Aabb* aabb = (optix::Aabb*)result; aabb->m_min = make_float3(-1.0f, -1.0f, -1.0f); aabb->m_max = make_float3(1.0f, 1.0f, 1.0f); } #endif
7b4b9b598880b8fd3f8c62c29268b138648a514a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2019 XGBoost contributors */ #include <xgboost/data.h> #include "./ellpack_page.cuh" #include "../common/hist_util.h" #include "../common/random.h" namespace xgboost { EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {} EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param) : impl_{new EllpackPageImpl(dmat, param)} {} EllpackPage::~EllpackPage() = default; size_t EllpackPage::Size() const { return impl_->Size(); } void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); } // Bin each input data entry, store the bin indices in compressed form. __global__ void CompressBinEllpackKernel( common::CompressedBufferWriter wr, common::CompressedByteT* __restrict__ buffer, // gidx_buffer const size_t* __restrict__ row_ptrs, // row offset of input data const Entry* __restrict__ entries, // One batch of input data const float* __restrict__ cuts, // HistogramCuts::cut_values_ const uint32_t* __restrict__ cut_rows, // HistogramCuts::cut_ptrs_ size_t base_row, // batch_row_begin size_t n_rows, size_t row_stride, unsigned int null_gidx_value) { size_t irow = threadIdx.x + blockIdx.x * blockDim.x; int ifeature = threadIdx.y + blockIdx.y * blockDim.y; if (irow >= n_rows || ifeature >= row_stride) { return; } int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]); unsigned int bin = null_gidx_value; if (ifeature < row_length) { Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature]; int feature = entry.index; float fvalue = entry.fvalue; // {feature_cuts, ncuts} forms the array of cuts of `feature'. const float* feature_cuts = &cuts[cut_rows[feature]]; int ncuts = cut_rows[feature + 1] - cut_rows[feature]; // Assigning the bin in current entry. // S.t.: fvalue < feature_cuts[bin] bin = dh::UpperBound(feature_cuts, ncuts, fvalue); if (bin >= ncuts) { bin = ncuts - 1; } // Add the number of bins in previous features. bin += cut_rows[feature]; } // Write to gidx buffer. wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature); } // Construct an ELLPACK matrix with the given number of empty rows. EllpackPageImpl::EllpackPageImpl(int device, EllpackInfo info, size_t n_rows) { monitor_.Init("ellpack_page"); dh::safe_cuda(hipSetDevice(device)); matrix.info = info; matrix.base_rowid = 0; matrix.n_rows = n_rows; monitor_.StartCuda("InitCompressedData"); InitCompressedData(device, n_rows); monitor_.StopCuda("InitCompressedData"); } // Construct an ELLPACK matrix in memory. EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param) { monitor_.Init("ellpack_page"); dh::safe_cuda(hipSetDevice(param.gpu_id)); matrix.n_rows = dmat->Info().num_row_; monitor_.StartCuda("Quantiles"); // Create the quantile sketches for the dmatrix and initialize HistogramCuts. common::HistogramCuts hmat; size_t row_stride = common::DeviceSketch(param.gpu_id, param.max_bin, param.gpu_batch_nrows, dmat, &hmat); monitor_.StopCuda("Quantiles"); monitor_.StartCuda("InitEllpackInfo"); InitInfo(param.gpu_id, dmat->IsDense(), row_stride, hmat); monitor_.StopCuda("InitEllpackInfo"); monitor_.StartCuda("InitCompressedData"); InitCompressedData(param.gpu_id, dmat->Info().num_row_); monitor_.StopCuda("InitCompressedData"); monitor_.StartCuda("BinningCompression"); DeviceHistogramBuilderState hist_builder_row_state(dmat->Info().num_row_); for (const auto& batch : dmat->GetBatches<SparsePage>()) { hist_builder_row_state.BeginBatch(batch); CreateHistIndices(param.gpu_id, batch, hist_builder_row_state.GetRowStateOnDevice()); hist_builder_row_state.EndBatch(); } monitor_.StopCuda("BinningCompression"); } // A functor that copies the data from one EllpackPage to another. struct CopyPage { common::CompressedBufferWriter cbw; common::CompressedByteT* dst_data_d; common::CompressedIterator<uint32_t> src_iterator_d; // The number of elements to skip. size_t offset; CopyPage(EllpackPageImpl* dst, EllpackPageImpl* src, size_t offset) : cbw{dst->matrix.info.NumSymbols()}, dst_data_d{dst->gidx_buffer.data()}, src_iterator_d{src->gidx_buffer.data(), src->matrix.info.NumSymbols()}, offset(offset) {} __device__ void operator()(size_t element_id) { cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id], element_id + offset); } }; // Copy the data from the given EllpackPage to the current page. size_t EllpackPageImpl::Copy(int device, EllpackPageImpl* page, size_t offset) { monitor_.StartCuda("Copy"); size_t num_elements = page->matrix.n_rows * page->matrix.info.row_stride; CHECK_EQ(matrix.info.row_stride, page->matrix.info.row_stride); CHECK_EQ(matrix.info.NumSymbols(), page->matrix.info.NumSymbols()); CHECK_GE(matrix.n_rows * matrix.info.row_stride, offset + num_elements); dh::LaunchN(device, num_elements, CopyPage(this, page, offset)); monitor_.StopCuda("Copy"); return num_elements; } // A functor that compacts the rows from one EllpackPage into another. struct CompactPage { common::CompressedBufferWriter cbw; common::CompressedByteT* dst_data_d; common::CompressedIterator<uint32_t> src_iterator_d; /*! \brief An array that maps the rows from the full DMatrix to the compacted page. * * The total size is the number of rows in the original, uncompacted DMatrix. Elements are the * row ids in the compacted page. Rows not needed are set to SIZE_MAX. * * An example compacting 16 rows to 8 rows: * [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, * SIZE_MAX] */ common::Span<size_t> row_indexes; size_t base_rowid; size_t row_stride; CompactPage(EllpackPageImpl* dst, EllpackPageImpl* src, common::Span<size_t> row_indexes) : cbw{dst->matrix.info.NumSymbols()}, dst_data_d{dst->gidx_buffer.data()}, src_iterator_d{src->gidx_buffer.data(), src->matrix.info.NumSymbols()}, row_indexes(row_indexes), base_rowid{src->matrix.base_rowid}, row_stride{src->matrix.info.row_stride} {} __device__ void operator()(size_t row_id) { size_t src_row = base_rowid + row_id; size_t dst_row = row_indexes[src_row]; if (dst_row == SIZE_MAX) return; size_t dst_offset = dst_row * row_stride; size_t src_offset = row_id * row_stride; for (size_t j = 0; j < row_stride; j++) { cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j], dst_offset + j); } } }; // Compacts the data from the given EllpackPage into the current page. void EllpackPageImpl::Compact(int device, EllpackPageImpl* page, common::Span<size_t> row_indexes) { monitor_.StartCuda("Compact"); CHECK_EQ(matrix.info.row_stride, page->matrix.info.row_stride); CHECK_EQ(matrix.info.NumSymbols(), page->matrix.info.NumSymbols()); CHECK_LE(page->matrix.base_rowid + page->matrix.n_rows, row_indexes.size()); dh::LaunchN(device, page->matrix.n_rows, CompactPage(this, page, row_indexes)); monitor_.StopCuda("Compact"); } // Construct an EllpackInfo based on histogram cuts of features. EllpackInfo::EllpackInfo(int device, bool is_dense, size_t row_stride, const common::HistogramCuts& hmat, dh::BulkAllocator* ba) : is_dense(is_dense), row_stride(row_stride), n_bins(hmat.Ptrs().back()) { ba->Allocate(device, &feature_segments, hmat.Ptrs().size(), &gidx_fvalue_map, hmat.Values().size(), &min_fvalue, hmat.MinValues().size()); dh::CopyVectorToDeviceSpan(gidx_fvalue_map, hmat.Values()); dh::CopyVectorToDeviceSpan(min_fvalue, hmat.MinValues()); dh::CopyVectorToDeviceSpan(feature_segments, hmat.Ptrs()); } // Initialize the EllpackInfo for this page. void EllpackPageImpl::InitInfo(int device, bool is_dense, size_t row_stride, const common::HistogramCuts& hmat) { matrix.info = EllpackInfo(device, is_dense, row_stride, hmat, &ba_); } // Initialize the buffer to stored compressed features. void EllpackPageImpl::InitCompressedData(int device, size_t num_rows) { size_t num_symbols = matrix.info.NumSymbols(); // Required buffer size for storing data matrix in ELLPack format. size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( matrix.info.row_stride * num_rows, num_symbols); ba_.Allocate(device, &gidx_buffer, compressed_size_bytes); thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0); matrix.gidx_iter = common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols); } // Compress a CSR page into ELLPACK. void EllpackPageImpl::CreateHistIndices(int device, const SparsePage& row_batch, const RowStateOnDevice& device_row_state) { // Has any been allocated for me in this batch? if (!device_row_state.rows_to_process_from_batch) return; unsigned int null_gidx_value = matrix.info.n_bins; size_t row_stride = matrix.info.row_stride; const auto& offset_vec = row_batch.offset.ConstHostVector(); // bin and compress entries in batches of rows size_t gpu_batch_nrows = ::min( dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)), static_cast<size_t>(device_row_state.rows_to_process_from_batch)); const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector(); size_t gpu_nbatches = common::DivRoundUp(device_row_state.rows_to_process_from_batch, gpu_batch_nrows); for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) { size_t batch_row_begin = gpu_batch * gpu_batch_nrows; size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows; if (batch_row_end > device_row_state.rows_to_process_from_batch) { batch_row_end = device_row_state.rows_to_process_from_batch; } size_t batch_nrows = batch_row_end - batch_row_begin; const auto ent_cnt_begin = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_begin]; const auto ent_cnt_end = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_end]; /*! \brief row offset in SparsePage (the input data). */ dh::device_vector<size_t> row_ptrs(batch_nrows + 1); thrust::copy( offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_begin, offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_end + 1, row_ptrs.begin()); // number of entries in this batch. size_t n_entries = ent_cnt_end - ent_cnt_begin; dh::device_vector<Entry> entries_d(n_entries); // copy data entries to device. dh::safe_cuda(hipMemcpy(entries_d.data().get(), data_vec.data() + ent_cnt_begin, n_entries * sizeof(Entry), hipMemcpyDefault)); const dim3 block3(32, 8, 1); // 256 threads const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x), common::DivRoundUp(row_stride, block3.y), 1); dh::LaunchKernel {grid3, block3} ( CompressBinEllpackKernel, common::CompressedBufferWriter(matrix.info.NumSymbols()), gidx_buffer.data(), row_ptrs.data().get(), entries_d.data().get(), matrix.info.gidx_fvalue_map.data(), matrix.info.feature_segments.data(), device_row_state.total_rows_processed + batch_row_begin, batch_nrows, row_stride, null_gidx_value); } } // Return the number of rows contained in this page. size_t EllpackPageImpl::Size() const { return matrix.n_rows; } // Clear the current page. void EllpackPageImpl::Clear() { ba_.Clear(); gidx_buffer = {}; idx_buffer.clear(); sparse_page_.Clear(); matrix.base_rowid = 0; matrix.n_rows = 0; device_initialized_ = false; } // Push a CSR page to the current page. // // The CSR pages are accumulated in memory until they reach a certain size, then written out as // compressed ELLPACK. void EllpackPageImpl::Push(int device, const SparsePage& batch) { sparse_page_.Push(batch); matrix.n_rows += batch.Size(); } // Compress the accumulated SparsePage. void EllpackPageImpl::CompressSparsePage(int device) { monitor_.StartCuda("InitCompressedData"); InitCompressedData(device, matrix.n_rows); monitor_.StopCuda("InitCompressedData"); monitor_.StartCuda("BinningCompression"); DeviceHistogramBuilderState hist_builder_row_state(matrix.n_rows); hist_builder_row_state.BeginBatch(sparse_page_); CreateHistIndices(device, sparse_page_, hist_builder_row_state.GetRowStateOnDevice()); hist_builder_row_state.EndBatch(); monitor_.StopCuda("BinningCompression"); monitor_.StartCuda("CopyDeviceToHost"); idx_buffer.resize(gidx_buffer.size()); dh::CopyDeviceSpanToVector(&idx_buffer, gidx_buffer); ba_.Clear(); gidx_buffer = {}; monitor_.StopCuda("CopyDeviceToHost"); } // Return the memory cost for storing the compressed features. size_t EllpackPageImpl::MemCostBytes() const { // Required buffer size for storing data matrix in ELLPack format. size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( matrix.info.row_stride * matrix.n_rows, matrix.info.NumSymbols()); return compressed_size_bytes; } // Copy the compressed features to GPU. void EllpackPageImpl::InitDevice(int device, EllpackInfo info) { if (device_initialized_) return; monitor_.StartCuda("CopyPageToDevice"); dh::safe_cuda(hipSetDevice(device)); gidx_buffer = {}; ba_.Allocate(device, &gidx_buffer, idx_buffer.size()); dh::CopyVectorToDeviceSpan(gidx_buffer, idx_buffer); matrix.info = info; matrix.gidx_iter = common::CompressedIterator<uint32_t>(gidx_buffer.data(), info.n_bins + 1); monitor_.StopCuda("CopyPageToDevice"); device_initialized_ = true; } } // namespace xgboost
7b4b9b598880b8fd3f8c62c29268b138648a514a.cu
/*! * Copyright 2019 XGBoost contributors */ #include <xgboost/data.h> #include "./ellpack_page.cuh" #include "../common/hist_util.h" #include "../common/random.h" namespace xgboost { EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {} EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param) : impl_{new EllpackPageImpl(dmat, param)} {} EllpackPage::~EllpackPage() = default; size_t EllpackPage::Size() const { return impl_->Size(); } void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); } // Bin each input data entry, store the bin indices in compressed form. __global__ void CompressBinEllpackKernel( common::CompressedBufferWriter wr, common::CompressedByteT* __restrict__ buffer, // gidx_buffer const size_t* __restrict__ row_ptrs, // row offset of input data const Entry* __restrict__ entries, // One batch of input data const float* __restrict__ cuts, // HistogramCuts::cut_values_ const uint32_t* __restrict__ cut_rows, // HistogramCuts::cut_ptrs_ size_t base_row, // batch_row_begin size_t n_rows, size_t row_stride, unsigned int null_gidx_value) { size_t irow = threadIdx.x + blockIdx.x * blockDim.x; int ifeature = threadIdx.y + blockIdx.y * blockDim.y; if (irow >= n_rows || ifeature >= row_stride) { return; } int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]); unsigned int bin = null_gidx_value; if (ifeature < row_length) { Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature]; int feature = entry.index; float fvalue = entry.fvalue; // {feature_cuts, ncuts} forms the array of cuts of `feature'. const float* feature_cuts = &cuts[cut_rows[feature]]; int ncuts = cut_rows[feature + 1] - cut_rows[feature]; // Assigning the bin in current entry. // S.t.: fvalue < feature_cuts[bin] bin = dh::UpperBound(feature_cuts, ncuts, fvalue); if (bin >= ncuts) { bin = ncuts - 1; } // Add the number of bins in previous features. bin += cut_rows[feature]; } // Write to gidx buffer. wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature); } // Construct an ELLPACK matrix with the given number of empty rows. EllpackPageImpl::EllpackPageImpl(int device, EllpackInfo info, size_t n_rows) { monitor_.Init("ellpack_page"); dh::safe_cuda(cudaSetDevice(device)); matrix.info = info; matrix.base_rowid = 0; matrix.n_rows = n_rows; monitor_.StartCuda("InitCompressedData"); InitCompressedData(device, n_rows); monitor_.StopCuda("InitCompressedData"); } // Construct an ELLPACK matrix in memory. EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param) { monitor_.Init("ellpack_page"); dh::safe_cuda(cudaSetDevice(param.gpu_id)); matrix.n_rows = dmat->Info().num_row_; monitor_.StartCuda("Quantiles"); // Create the quantile sketches for the dmatrix and initialize HistogramCuts. common::HistogramCuts hmat; size_t row_stride = common::DeviceSketch(param.gpu_id, param.max_bin, param.gpu_batch_nrows, dmat, &hmat); monitor_.StopCuda("Quantiles"); monitor_.StartCuda("InitEllpackInfo"); InitInfo(param.gpu_id, dmat->IsDense(), row_stride, hmat); monitor_.StopCuda("InitEllpackInfo"); monitor_.StartCuda("InitCompressedData"); InitCompressedData(param.gpu_id, dmat->Info().num_row_); monitor_.StopCuda("InitCompressedData"); monitor_.StartCuda("BinningCompression"); DeviceHistogramBuilderState hist_builder_row_state(dmat->Info().num_row_); for (const auto& batch : dmat->GetBatches<SparsePage>()) { hist_builder_row_state.BeginBatch(batch); CreateHistIndices(param.gpu_id, batch, hist_builder_row_state.GetRowStateOnDevice()); hist_builder_row_state.EndBatch(); } monitor_.StopCuda("BinningCompression"); } // A functor that copies the data from one EllpackPage to another. struct CopyPage { common::CompressedBufferWriter cbw; common::CompressedByteT* dst_data_d; common::CompressedIterator<uint32_t> src_iterator_d; // The number of elements to skip. size_t offset; CopyPage(EllpackPageImpl* dst, EllpackPageImpl* src, size_t offset) : cbw{dst->matrix.info.NumSymbols()}, dst_data_d{dst->gidx_buffer.data()}, src_iterator_d{src->gidx_buffer.data(), src->matrix.info.NumSymbols()}, offset(offset) {} __device__ void operator()(size_t element_id) { cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id], element_id + offset); } }; // Copy the data from the given EllpackPage to the current page. size_t EllpackPageImpl::Copy(int device, EllpackPageImpl* page, size_t offset) { monitor_.StartCuda("Copy"); size_t num_elements = page->matrix.n_rows * page->matrix.info.row_stride; CHECK_EQ(matrix.info.row_stride, page->matrix.info.row_stride); CHECK_EQ(matrix.info.NumSymbols(), page->matrix.info.NumSymbols()); CHECK_GE(matrix.n_rows * matrix.info.row_stride, offset + num_elements); dh::LaunchN(device, num_elements, CopyPage(this, page, offset)); monitor_.StopCuda("Copy"); return num_elements; } // A functor that compacts the rows from one EllpackPage into another. struct CompactPage { common::CompressedBufferWriter cbw; common::CompressedByteT* dst_data_d; common::CompressedIterator<uint32_t> src_iterator_d; /*! \brief An array that maps the rows from the full DMatrix to the compacted page. * * The total size is the number of rows in the original, uncompacted DMatrix. Elements are the * row ids in the compacted page. Rows not needed are set to SIZE_MAX. * * An example compacting 16 rows to 8 rows: * [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, * SIZE_MAX] */ common::Span<size_t> row_indexes; size_t base_rowid; size_t row_stride; CompactPage(EllpackPageImpl* dst, EllpackPageImpl* src, common::Span<size_t> row_indexes) : cbw{dst->matrix.info.NumSymbols()}, dst_data_d{dst->gidx_buffer.data()}, src_iterator_d{src->gidx_buffer.data(), src->matrix.info.NumSymbols()}, row_indexes(row_indexes), base_rowid{src->matrix.base_rowid}, row_stride{src->matrix.info.row_stride} {} __device__ void operator()(size_t row_id) { size_t src_row = base_rowid + row_id; size_t dst_row = row_indexes[src_row]; if (dst_row == SIZE_MAX) return; size_t dst_offset = dst_row * row_stride; size_t src_offset = row_id * row_stride; for (size_t j = 0; j < row_stride; j++) { cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j], dst_offset + j); } } }; // Compacts the data from the given EllpackPage into the current page. void EllpackPageImpl::Compact(int device, EllpackPageImpl* page, common::Span<size_t> row_indexes) { monitor_.StartCuda("Compact"); CHECK_EQ(matrix.info.row_stride, page->matrix.info.row_stride); CHECK_EQ(matrix.info.NumSymbols(), page->matrix.info.NumSymbols()); CHECK_LE(page->matrix.base_rowid + page->matrix.n_rows, row_indexes.size()); dh::LaunchN(device, page->matrix.n_rows, CompactPage(this, page, row_indexes)); monitor_.StopCuda("Compact"); } // Construct an EllpackInfo based on histogram cuts of features. EllpackInfo::EllpackInfo(int device, bool is_dense, size_t row_stride, const common::HistogramCuts& hmat, dh::BulkAllocator* ba) : is_dense(is_dense), row_stride(row_stride), n_bins(hmat.Ptrs().back()) { ba->Allocate(device, &feature_segments, hmat.Ptrs().size(), &gidx_fvalue_map, hmat.Values().size(), &min_fvalue, hmat.MinValues().size()); dh::CopyVectorToDeviceSpan(gidx_fvalue_map, hmat.Values()); dh::CopyVectorToDeviceSpan(min_fvalue, hmat.MinValues()); dh::CopyVectorToDeviceSpan(feature_segments, hmat.Ptrs()); } // Initialize the EllpackInfo for this page. void EllpackPageImpl::InitInfo(int device, bool is_dense, size_t row_stride, const common::HistogramCuts& hmat) { matrix.info = EllpackInfo(device, is_dense, row_stride, hmat, &ba_); } // Initialize the buffer to stored compressed features. void EllpackPageImpl::InitCompressedData(int device, size_t num_rows) { size_t num_symbols = matrix.info.NumSymbols(); // Required buffer size for storing data matrix in ELLPack format. size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( matrix.info.row_stride * num_rows, num_symbols); ba_.Allocate(device, &gidx_buffer, compressed_size_bytes); thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0); matrix.gidx_iter = common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols); } // Compress a CSR page into ELLPACK. void EllpackPageImpl::CreateHistIndices(int device, const SparsePage& row_batch, const RowStateOnDevice& device_row_state) { // Has any been allocated for me in this batch? if (!device_row_state.rows_to_process_from_batch) return; unsigned int null_gidx_value = matrix.info.n_bins; size_t row_stride = matrix.info.row_stride; const auto& offset_vec = row_batch.offset.ConstHostVector(); // bin and compress entries in batches of rows size_t gpu_batch_nrows = std::min( dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)), static_cast<size_t>(device_row_state.rows_to_process_from_batch)); const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector(); size_t gpu_nbatches = common::DivRoundUp(device_row_state.rows_to_process_from_batch, gpu_batch_nrows); for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) { size_t batch_row_begin = gpu_batch * gpu_batch_nrows; size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows; if (batch_row_end > device_row_state.rows_to_process_from_batch) { batch_row_end = device_row_state.rows_to_process_from_batch; } size_t batch_nrows = batch_row_end - batch_row_begin; const auto ent_cnt_begin = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_begin]; const auto ent_cnt_end = offset_vec[device_row_state.row_offset_in_current_batch + batch_row_end]; /*! \brief row offset in SparsePage (the input data). */ dh::device_vector<size_t> row_ptrs(batch_nrows + 1); thrust::copy( offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_begin, offset_vec.data() + device_row_state.row_offset_in_current_batch + batch_row_end + 1, row_ptrs.begin()); // number of entries in this batch. size_t n_entries = ent_cnt_end - ent_cnt_begin; dh::device_vector<Entry> entries_d(n_entries); // copy data entries to device. dh::safe_cuda(cudaMemcpy(entries_d.data().get(), data_vec.data() + ent_cnt_begin, n_entries * sizeof(Entry), cudaMemcpyDefault)); const dim3 block3(32, 8, 1); // 256 threads const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x), common::DivRoundUp(row_stride, block3.y), 1); dh::LaunchKernel {grid3, block3} ( CompressBinEllpackKernel, common::CompressedBufferWriter(matrix.info.NumSymbols()), gidx_buffer.data(), row_ptrs.data().get(), entries_d.data().get(), matrix.info.gidx_fvalue_map.data(), matrix.info.feature_segments.data(), device_row_state.total_rows_processed + batch_row_begin, batch_nrows, row_stride, null_gidx_value); } } // Return the number of rows contained in this page. size_t EllpackPageImpl::Size() const { return matrix.n_rows; } // Clear the current page. void EllpackPageImpl::Clear() { ba_.Clear(); gidx_buffer = {}; idx_buffer.clear(); sparse_page_.Clear(); matrix.base_rowid = 0; matrix.n_rows = 0; device_initialized_ = false; } // Push a CSR page to the current page. // // The CSR pages are accumulated in memory until they reach a certain size, then written out as // compressed ELLPACK. void EllpackPageImpl::Push(int device, const SparsePage& batch) { sparse_page_.Push(batch); matrix.n_rows += batch.Size(); } // Compress the accumulated SparsePage. void EllpackPageImpl::CompressSparsePage(int device) { monitor_.StartCuda("InitCompressedData"); InitCompressedData(device, matrix.n_rows); monitor_.StopCuda("InitCompressedData"); monitor_.StartCuda("BinningCompression"); DeviceHistogramBuilderState hist_builder_row_state(matrix.n_rows); hist_builder_row_state.BeginBatch(sparse_page_); CreateHistIndices(device, sparse_page_, hist_builder_row_state.GetRowStateOnDevice()); hist_builder_row_state.EndBatch(); monitor_.StopCuda("BinningCompression"); monitor_.StartCuda("CopyDeviceToHost"); idx_buffer.resize(gidx_buffer.size()); dh::CopyDeviceSpanToVector(&idx_buffer, gidx_buffer); ba_.Clear(); gidx_buffer = {}; monitor_.StopCuda("CopyDeviceToHost"); } // Return the memory cost for storing the compressed features. size_t EllpackPageImpl::MemCostBytes() const { // Required buffer size for storing data matrix in ELLPack format. size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( matrix.info.row_stride * matrix.n_rows, matrix.info.NumSymbols()); return compressed_size_bytes; } // Copy the compressed features to GPU. void EllpackPageImpl::InitDevice(int device, EllpackInfo info) { if (device_initialized_) return; monitor_.StartCuda("CopyPageToDevice"); dh::safe_cuda(cudaSetDevice(device)); gidx_buffer = {}; ba_.Allocate(device, &gidx_buffer, idx_buffer.size()); dh::CopyVectorToDeviceSpan(gidx_buffer, idx_buffer); matrix.info = info; matrix.gidx_iter = common::CompressedIterator<uint32_t>(gidx_buffer.data(), info.n_bins + 1); monitor_.StopCuda("CopyPageToDevice"); device_initialized_ = true; } } // namespace xgboost
786c96e5671417acea3c4bc6cb4e58502c932fe6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cfloat> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "mymath.cuh" #include "sphere_hip.cuh" #include "ray.cuh" extern "C" hipError_t InitCuda(int w, int h, unsigned char** dev_bitmap); extern "C" hipError_t CalculateCuda(int w, int h, unsigned char* dev_bitmap, unsigned char* host_bitmap); extern "C" void DeinitCuda(unsigned char* dev_bitmap); __device__ Vec3 color(Ray &r, Sphere *dev_sp) { hit_record rec; if (dev_sp->hit(r, 0.001, DBL_MAX, rec)) { return 0.5*Vec3(rec.normal.x + 1, rec.normal.y + 1, rec.normal.z + 1); } double t = 0.5*(r.direction.y + 1.0); return Vec3(1.0, 1.0, 1.0)*(1.0 - t) + Vec3(0.5, 0.7, 1.0)*t; } __global__ void RayKernel(int w, int h, unsigned char* dev_bitmap,Sphere *dev_sp) { Vec3 v1(1, 1, 1); Vec3 v2(0, 0, 0); int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; if (x<w && y<h) { int offset = x + y*w; double u, v; u = double(x) / double(w); v = double(y) / double(h); Vec3 lower_left_corner(-2.0, -1.5, -1.0); Vec3 horizontal(4.0, 0.0, 0.0); Vec3 vertical(0.0, 3.0, 0.0); Vec3 origin(0.0, 0.0, 0.0); Ray r(origin, lower_left_corner + horizontal*u + vertical*v); Vec3 pixel = color(r, dev_sp); dev_bitmap[offset * 4] = int(255.99*pixel.r); dev_bitmap[offset * 4 + 1] = int(255.99*pixel.g); dev_bitmap[offset * 4 + 2] = int(255.99*pixel.b); dev_bitmap[offset * 4 + 3] = 1; } } hipError_t CalculateCuda(int w, int h, unsigned char* dev_bitmap, unsigned char* host_bitmap) { hipError_t cudaStatus; int image_size = w * h * 4; Sphere *sp1 = (Sphere *)malloc(sizeof(Sphere)); sp1->radius = 0.5; sp1->center.x = 0; sp1->center.y = 0; sp1->center.z = -1; Sphere *dev_sp=nullptr; hipMalloc((void**)&dev_sp, sizeof(Sphere)); hipMemcpy(dev_sp, sp1, sizeof(Sphere), hipMemcpyHostToDevice); // Launch a kernel on the GPU with one thread for each element. dim3 grids((w+31)/32, (h+31)/32); dim3 threads(32, 32); RayKernel << <grids, threads >> >(w, h,dev_bitmap, dev_sp); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(host_bitmap, dev_bitmap, image_size, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } hipFree(dev_sp); free(sp1); return cudaStatus; } hipError_t InitCuda(int w, int h, unsigned char** dev_bitmap) { hipError_t cudaStatus; int image_size = w * h * 4; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); } // Allocate GPU buffers for three vectors (two input, one output) cudaStatus = hipMalloc((void**)dev_bitmap, image_size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } return cudaStatus; } void DeinitCuda(unsigned char* dev_bitmap) { hipFree(dev_bitmap); }
786c96e5671417acea3c4bc6cb4e58502c932fe6.cu
#include <stdio.h> #include <stdlib.h> #include <cfloat> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "mymath.cuh" #include "sphere.cuh" #include "ray.cuh" extern "C" cudaError_t InitCuda(int w, int h, unsigned char** dev_bitmap); extern "C" cudaError_t CalculateCuda(int w, int h, unsigned char* dev_bitmap, unsigned char* host_bitmap); extern "C" void DeinitCuda(unsigned char* dev_bitmap); __device__ Vec3 color(Ray &r, Sphere *dev_sp) { hit_record rec; if (dev_sp->hit(r, 0.001, DBL_MAX, rec)) { return 0.5*Vec3(rec.normal.x + 1, rec.normal.y + 1, rec.normal.z + 1); } double t = 0.5*(r.direction.y + 1.0); return Vec3(1.0, 1.0, 1.0)*(1.0 - t) + Vec3(0.5, 0.7, 1.0)*t; } __global__ void RayKernel(int w, int h, unsigned char* dev_bitmap,Sphere *dev_sp) { Vec3 v1(1, 1, 1); Vec3 v2(0, 0, 0); int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; if (x<w && y<h) { int offset = x + y*w; double u, v; u = double(x) / double(w); v = double(y) / double(h); Vec3 lower_left_corner(-2.0, -1.5, -1.0); Vec3 horizontal(4.0, 0.0, 0.0); Vec3 vertical(0.0, 3.0, 0.0); Vec3 origin(0.0, 0.0, 0.0); Ray r(origin, lower_left_corner + horizontal*u + vertical*v); Vec3 pixel = color(r, dev_sp); dev_bitmap[offset * 4] = int(255.99*pixel.r); dev_bitmap[offset * 4 + 1] = int(255.99*pixel.g); dev_bitmap[offset * 4 + 2] = int(255.99*pixel.b); dev_bitmap[offset * 4 + 3] = 1; } } cudaError_t CalculateCuda(int w, int h, unsigned char* dev_bitmap, unsigned char* host_bitmap) { cudaError_t cudaStatus; int image_size = w * h * 4; Sphere *sp1 = (Sphere *)malloc(sizeof(Sphere)); sp1->radius = 0.5; sp1->center.x = 0; sp1->center.y = 0; sp1->center.z = -1; Sphere *dev_sp=nullptr; cudaMalloc((void**)&dev_sp, sizeof(Sphere)); cudaMemcpy(dev_sp, sp1, sizeof(Sphere), cudaMemcpyHostToDevice); // Launch a kernel on the GPU with one thread for each element. dim3 grids((w+31)/32, (h+31)/32); dim3 threads(32, 32); RayKernel << <grids, threads >> >(w, h,dev_bitmap, dev_sp); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(host_bitmap, dev_bitmap, image_size, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaFree(dev_sp); free(sp1); return cudaStatus; } cudaError_t InitCuda(int w, int h, unsigned char** dev_bitmap) { cudaError_t cudaStatus; int image_size = w * h * 4; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); } // Allocate GPU buffers for three vectors (two input, one output) cudaStatus = cudaMalloc((void**)dev_bitmap, image_size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } return cudaStatus; } void DeinitCuda(unsigned char* dev_bitmap) { cudaFree(dev_bitmap); }
e118f4c796b0de47e7bb620a2b8a134292897991.hip
// !!! This is a file automatically generated by hipify!!! #include "gpu_runtime.h" void convert_F_to_C_order(float *data, int m, int n) { /* convert an F order matrix to C order: sample data: 1, 2, 3, 4 5, 6, 7, 8 9, 10, 11, 12 output: 1, 4, 7, 10 2, 5, 8, 11 3, 6, 9, 12 */ size_t workspace_size = m*n; float *work_data = (float*)malloc(workspace_size*sizeof(float)); hipMemcpy(work_data, data, workspace_size*sizeof(float), hipMemcpyDeviceToHost); float *ans_data = (float*)malloc(workspace_size*sizeof(float)); for(int k=0; k<workspace_size; k++) { int i = k%m; int j = k/m; ans_data[i*n+j] = work_data[k]; } hipMemcpy(data, ans_data, workspace_size*sizeof(float), hipMemcpyHostToDevice); free(work_data); free(ans_data); return; } int CuSparse_DLGpuCsrmm(const DLArrayHandle data_handle, const DLArrayHandle row_handle, const DLArrayHandle col_handle, int nrow, int ncol, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ transposeB = ! transposeB; //cuSparse limit that A and B cannot transpose at the same time. /* using namespace std; std::cout << "transpose:" << transposeA << transposeB << std::endl; */ assert (!(transposeA == transposeB && transposeA == 1)); assert (data_handle->ndim == 1); /* using namespace std; std::cout << "Data_hadle_dim:" << data_handle->ndim << std::endl; std::cout << "Data_hadle_shape[0]:" << data_handle->shape[0] << std::endl; std::cout << "Row_hadle_dim:" << row_handle->ndim << std::endl; std::cout << "Row_hadle_shape[0]:" << row_handle->shape[0] << std::endl; std::cout << "Col_hadle_dim:" << col_handle->ndim << std::endl; std::cout << "Col_hadle_shape[0]:" << col_handle->shape[0] << std::endl; std::cout << "nrow:" << nrow << std::endl; std::cout << "ncol:" << ncol << std::endl; int workspace_size = matB->shape[0]*matB->shape[1]*sizeof(float); float *work_data = (float*)malloc(workspace_size); hipMemcpy(work_data, matB->data, workspace_size, hipMemcpyDeviceToHost); using namespace std; for (int i=0; i<workspace_size/sizeof(float); i++) std::cout<< "matB[i]=" << work_data[i] << std::endl; */ assert (row_handle->ndim == 1); assert (col_handle->ndim == 1); assert (matB->ndim == 2); assert (matC->ndim == 2); int m = nrow; int k = ncol; int n = matC->shape[1]; int nnz = data_handle->shape[0]; int dev_id = (data_handle->ctx).device_id; cusp_init(dev_id, stream_handle); float alpha = 1.0; float beta = 0.0; hipsparseMatDescr_t descr = 0; CUSP_CALL(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); hipsparseOperation_t transA = transposeA ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; hipsparseOperation_t transB = transposeB ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; int ldb = matB->shape[1]; int ldc = matC->shape[0]; CUSP_CALL(hipsparseScsrmm2(cusp_map[dev_id], transA, transB, m, n, k, nnz, (const float*)&alpha, descr, (const float*)data_handle->data, (const int*)row_handle->data, (const int*)col_handle->data, (const float*)matB->data, (int)ldb, (const float*)&beta, (float*)matC->data, (int)ldc)); convert_F_to_C_order((float*)matC->data, matC->shape[0], matC->shape[1]); if(p != NULL){ int size_input1 = 1, size_input2 = 1, size_input3 = 1, size_output = 1; for(int i = 0; i < data_handle -> ndim; i++) size_input1 *= data_handle -> shape[i]; for(int i = 0; i < row_handle -> ndim; i++) size_input2 *= row_handle -> shape[i]; for(int i = 0; i < col_handle -> ndim; i++) size_input3 *= col_handle -> shape[i]; for(int i = 0; i < matC -> ndim; i++) size_output *= matC -> shape[i]; p -> input_memory = 1.0 * (size_input1 + size_input2 + size_input3) * sizeof(float) / 1024 / 1024; p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; p -> workspace_memory = 0; } return 0; }
e118f4c796b0de47e7bb620a2b8a134292897991.cu
#include "gpu_runtime.h" void convert_F_to_C_order(float *data, int m, int n) { /* convert an F order matrix to C order: sample data: 1, 2, 3, 4 5, 6, 7, 8 9, 10, 11, 12 output: 1, 4, 7, 10 2, 5, 8, 11 3, 6, 9, 12 */ size_t workspace_size = m*n; float *work_data = (float*)malloc(workspace_size*sizeof(float)); cudaMemcpy(work_data, data, workspace_size*sizeof(float), cudaMemcpyDeviceToHost); float *ans_data = (float*)malloc(workspace_size*sizeof(float)); for(int k=0; k<workspace_size; k++) { int i = k%m; int j = k/m; ans_data[i*n+j] = work_data[k]; } cudaMemcpy(data, ans_data, workspace_size*sizeof(float), cudaMemcpyHostToDevice); free(work_data); free(ans_data); return; } int CuSparse_DLGpuCsrmm(const DLArrayHandle data_handle, const DLArrayHandle row_handle, const DLArrayHandle col_handle, int nrow, int ncol, bool transposeA, const DLArrayHandle matB, bool transposeB, DLArrayHandle matC, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ transposeB = ! transposeB; //cuSparse limit that A and B cannot transpose at the same time. /* using namespace std; std::cout << "transpose:" << transposeA << transposeB << std::endl; */ assert (!(transposeA == transposeB && transposeA == 1)); assert (data_handle->ndim == 1); /* using namespace std; std::cout << "Data_hadle_dim:" << data_handle->ndim << std::endl; std::cout << "Data_hadle_shape[0]:" << data_handle->shape[0] << std::endl; std::cout << "Row_hadle_dim:" << row_handle->ndim << std::endl; std::cout << "Row_hadle_shape[0]:" << row_handle->shape[0] << std::endl; std::cout << "Col_hadle_dim:" << col_handle->ndim << std::endl; std::cout << "Col_hadle_shape[0]:" << col_handle->shape[0] << std::endl; std::cout << "nrow:" << nrow << std::endl; std::cout << "ncol:" << ncol << std::endl; int workspace_size = matB->shape[0]*matB->shape[1]*sizeof(float); float *work_data = (float*)malloc(workspace_size); cudaMemcpy(work_data, matB->data, workspace_size, cudaMemcpyDeviceToHost); using namespace std; for (int i=0; i<workspace_size/sizeof(float); i++) std::cout<< "matB[i]=" << work_data[i] << std::endl; */ assert (row_handle->ndim == 1); assert (col_handle->ndim == 1); assert (matB->ndim == 2); assert (matC->ndim == 2); int m = nrow; int k = ncol; int n = matC->shape[1]; int nnz = data_handle->shape[0]; int dev_id = (data_handle->ctx).device_id; cusp_init(dev_id, stream_handle); float alpha = 1.0; float beta = 0.0; cusparseMatDescr_t descr = 0; CUSP_CALL(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); cusparseOperation_t transA = transposeA ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; cusparseOperation_t transB = transposeB ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; int ldb = matB->shape[1]; int ldc = matC->shape[0]; CUSP_CALL(cusparseScsrmm2(cusp_map[dev_id], transA, transB, m, n, k, nnz, (const float*)&alpha, descr, (const float*)data_handle->data, (const int*)row_handle->data, (const int*)col_handle->data, (const float*)matB->data, (int)ldb, (const float*)&beta, (float*)matC->data, (int)ldc)); convert_F_to_C_order((float*)matC->data, matC->shape[0], matC->shape[1]); if(p != NULL){ int size_input1 = 1, size_input2 = 1, size_input3 = 1, size_output = 1; for(int i = 0; i < data_handle -> ndim; i++) size_input1 *= data_handle -> shape[i]; for(int i = 0; i < row_handle -> ndim; i++) size_input2 *= row_handle -> shape[i]; for(int i = 0; i < col_handle -> ndim; i++) size_input3 *= col_handle -> shape[i]; for(int i = 0; i < matC -> ndim; i++) size_output *= matC -> shape[i]; p -> input_memory = 1.0 * (size_input1 + size_input2 + size_input3) * sizeof(float) / 1024 / 1024; p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; p -> workspace_memory = 0; } return 0; }
3e6f88b5d264f9759a5676023107e3665cec29df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_minus_2_bot; int xdim0_update_halo_kernel2_yvel_minus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_minus_2_bot; int ydim0_update_halo_kernel2_yvel_minus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_minus_2_bot; int xdim1_update_halo_kernel2_yvel_minus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_minus_2_bot; int ydim1_update_halo_kernel2_yvel_minus_2_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_minus_2_bot * (y) + \ xdim0_update_halo_kernel2_yvel_minus_2_bot * \ ydim0_update_halo_kernel2_yvel_minus_2_bot * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_minus_2_bot * (y) + \ xdim1_update_halo_kernel2_yvel_minus_2_bot * \ ydim1_update_halo_kernel2_yvel_minus_2_bot * (z)) // user function __device__ inline void update_halo_kernel2_yvel_minus_2_bot_gpu(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, 2, 0)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, 2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_minus_2_bot( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_bot + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_bot * ydim0_update_halo_kernel2_yvel_minus_2_bot; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_bot + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_bot * ydim1_update_halo_kernel2_yvel_minus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_minus_2_bot_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_yvel_minus_2_bot(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 81)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(81, "update_halo_kernel2_yvel_minus_2_bot"); OPS_kernels[81].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_yvel_minus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_yvel_minus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_yvel_minus_2_bot_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_2_bot, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_minus_2_bot_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_2_bot, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_minus_2_bot_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_2_bot, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_minus_2_bot_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_2_bot, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_minus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[81].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_minus_2_bot), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[81].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[81].mpi_time += t2 - t1; OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
3e6f88b5d264f9759a5676023107e3665cec29df.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_minus_2_bot; int xdim0_update_halo_kernel2_yvel_minus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_minus_2_bot; int ydim0_update_halo_kernel2_yvel_minus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_minus_2_bot; int xdim1_update_halo_kernel2_yvel_minus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_minus_2_bot; int ydim1_update_halo_kernel2_yvel_minus_2_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_minus_2_bot * (y) + \ xdim0_update_halo_kernel2_yvel_minus_2_bot * \ ydim0_update_halo_kernel2_yvel_minus_2_bot * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_minus_2_bot * (y) + \ xdim1_update_halo_kernel2_yvel_minus_2_bot * \ ydim1_update_halo_kernel2_yvel_minus_2_bot * (z)) // user function __device__ inline void update_halo_kernel2_yvel_minus_2_bot_gpu(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, 2, 0)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, 2, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_minus_2_bot( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_bot + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_bot * ydim0_update_halo_kernel2_yvel_minus_2_bot; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_bot + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_bot * ydim1_update_halo_kernel2_yvel_minus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_minus_2_bot_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_yvel_minus_2_bot(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 81)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(81, "update_halo_kernel2_yvel_minus_2_bot"); OPS_kernels[81].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_yvel_minus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_yvel_minus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_yvel_minus_2_bot_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_2_bot, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_minus_2_bot_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_2_bot, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_minus_2_bot_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_2_bot, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_minus_2_bot_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_2_bot, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_minus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[81].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_yvel_minus_2_bot<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[81].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[81].mpi_time += t2 - t1; OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
b19941cd0ed5d7e3c7072276cf77ef9d90ddb6a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <LinearLayerGPU.h> #include <GpuUtils.h> #include <algorithm> #define trPerBlock 1024 template <typename T> __global__ void k_dense_forward(T* output, const T* weights, const T* input, size_t inputSize, const T* bias, size_t outputSize) { const unsigned int batch_offset_output = blockIdx.y * outputSize; const unsigned int batch_offset_input = blockIdx.y * inputSize; const auto i = blockIdx.x * blockDim.x + threadIdx.x; if(i < outputSize) { float result = 0.0f; for (int j = 0; j < inputSize; j++) { //result = __fmaf_rn(input[j], weights[i * inputSize + j], result); // very fast multiply add = a*b + c result += input[batch_offset_input + j] * weights[i * inputSize + j]; } output[batch_offset_output + i] = result + bias[i]; } } void linearLayerForwardPassGPU(float* output,const float* weights, const float* input, const shape& input_shape, const float* bias, const shape& output_shape) { auto threadsPerBlock = static_cast<unsigned int>(::min(output_shape.width, static_cast<size_t>(trPerBlock))); auto num_of_blocks = utils::getBlockSize(threadsPerBlock, output_shape.width); dim3 blocks(num_of_blocks, output_shape.batches); k_dense_forward<float> << <blocks, threadsPerBlock >> > (output, weights, input, input_shape.width, bias, output_shape.width); utils::waitAndCheckForErrors(); } template <typename T> __global__ void k_calcDerivativeWRtoInput(T* derivativeWRtoInput, size_t inputSize, const T* derivateWRtoOutput, size_t outputSize, const T* weights) { auto inputIndex = blockIdx.x * blockDim.x + threadIdx.x; if (inputIndex < inputSize) { derivativeWRtoInput[inputIndex] = 0.0f; for (size_t i = 0; i < outputSize; i++) { derivativeWRtoInput[inputIndex] += derivateWRtoOutput[i] * weights[i * inputSize + inputIndex]; } } } void calcDerivativeWRtoInput(float* derivativeWRtoInput, size_t inputSize, const float* derivateWRtoOutput, shape output_shape, const float* weights) { auto threadsPerBlock = static_cast<unsigned int>(::min(inputSize, static_cast<size_t>(trPerBlock))); auto blocks = utils::getBlockSize(threadsPerBlock, inputSize); std::vector<hipStream_t> streams; size_t outputSize = output_shape.volume(); streams.resize(output_shape.batches); for (size_t i = 0; i < streams.size(); i++) { hipStreamCreate(&streams[i]); k_calcDerivativeWRtoInput << <blocks, threadsPerBlock,0, streams[i]>> > (derivativeWRtoInput + i * inputSize, inputSize, derivateWRtoOutput + i * output_shape.volume(), outputSize, weights); } utils::waitAndCheckForErrors(); for (size_t i = 0; i < streams.size(); i++) { hipStreamDestroy(streams[i]); } } template <typename T> __global__ void k_updateWeights(const T* weights, T* weights_deriv, const T* derivativeWRtoOutput,const T* input, size_t inputSize, size_t outputSize, size_t batches, size_t out_offset) { size_t weightIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t derivOutIdx = weightIndex / inputSize; size_t inpIdx = weightIndex - derivOutIdx * inputSize; if(weightIndex < (inputSize * outputSize)) { float error = 0.0f; for (int i = 0; i < batches; i++) { error += input[inpIdx + i * inputSize] * derivativeWRtoOutput[derivOutIdx + i * out_offset]; } weights_deriv[weightIndex] = error; } } void calcWeightsDeriv(const float* weights, float* weights_deriv, const float* derivativeWRtoOutput, const float* input, size_t inputSize, size_t outputSize, shape output_shape) { auto threadsPerBlock = static_cast<unsigned int>(::min(outputSize * inputSize, static_cast<size_t>(trPerBlock))); auto blocks = utils::getBlockSize(threadsPerBlock, outputSize * inputSize); k_updateWeights << <blocks, threadsPerBlock >> > (weights, weights_deriv, derivativeWRtoOutput, input, inputSize, outputSize, output_shape.batches, output_shape.volume()); utils::waitAndCheckForErrors(); } template <typename T> __global__ void k_updateBias(const T* bias, T* bias_deriv, const T* derivative_wr_to_out, size_t output_size, size_t batches, size_t out_offset) { size_t biasIndex = blockIdx.x * blockDim.x + threadIdx.x; if (biasIndex < output_size) { float error = 0.0f; for (int i = 0; i < batches; i++) { error += derivative_wr_to_out[biasIndex + i * out_offset]; } bias_deriv[biasIndex] = error; } } void calcBiasDeriv(const float* bias, float* bias_deriv, const float* derivative_wr_to_out, size_t output_size, shape output_shape) { auto threadsPerBlock = static_cast<unsigned int>(::min(output_size, static_cast<size_t>(trPerBlock))); auto blocks = utils::getBlockSize(threadsPerBlock, output_size); k_updateBias << <blocks, threadsPerBlock >> > (bias, bias_deriv, derivative_wr_to_out, output_size, output_shape.batches, output_shape.volume()); utils::waitAndCheckForErrors(); }
b19941cd0ed5d7e3c7072276cf77ef9d90ddb6a6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <LinearLayerGPU.h> #include <GpuUtils.h> #include <algorithm> #define trPerBlock 1024 template <typename T> __global__ void k_dense_forward(T* output, const T* weights, const T* input, size_t inputSize, const T* bias, size_t outputSize) { const unsigned int batch_offset_output = blockIdx.y * outputSize; const unsigned int batch_offset_input = blockIdx.y * inputSize; const auto i = blockIdx.x * blockDim.x + threadIdx.x; if(i < outputSize) { float result = 0.0f; for (int j = 0; j < inputSize; j++) { //result = __fmaf_rn(input[j], weights[i * inputSize + j], result); // very fast multiply add = a*b + c result += input[batch_offset_input + j] * weights[i * inputSize + j]; } output[batch_offset_output + i] = result + bias[i]; } } void linearLayerForwardPassGPU(float* output,const float* weights, const float* input, const shape& input_shape, const float* bias, const shape& output_shape) { auto threadsPerBlock = static_cast<unsigned int>(std::min(output_shape.width, static_cast<size_t>(trPerBlock))); auto num_of_blocks = utils::getBlockSize(threadsPerBlock, output_shape.width); dim3 blocks(num_of_blocks, output_shape.batches); k_dense_forward<float> << <blocks, threadsPerBlock >> > (output, weights, input, input_shape.width, bias, output_shape.width); utils::waitAndCheckForErrors(); } template <typename T> __global__ void k_calcDerivativeWRtoInput(T* derivativeWRtoInput, size_t inputSize, const T* derivateWRtoOutput, size_t outputSize, const T* weights) { auto inputIndex = blockIdx.x * blockDim.x + threadIdx.x; if (inputIndex < inputSize) { derivativeWRtoInput[inputIndex] = 0.0f; for (size_t i = 0; i < outputSize; i++) { derivativeWRtoInput[inputIndex] += derivateWRtoOutput[i] * weights[i * inputSize + inputIndex]; } } } void calcDerivativeWRtoInput(float* derivativeWRtoInput, size_t inputSize, const float* derivateWRtoOutput, shape output_shape, const float* weights) { auto threadsPerBlock = static_cast<unsigned int>(std::min(inputSize, static_cast<size_t>(trPerBlock))); auto blocks = utils::getBlockSize(threadsPerBlock, inputSize); std::vector<cudaStream_t> streams; size_t outputSize = output_shape.volume(); streams.resize(output_shape.batches); for (size_t i = 0; i < streams.size(); i++) { cudaStreamCreate(&streams[i]); k_calcDerivativeWRtoInput << <blocks, threadsPerBlock,0, streams[i]>> > (derivativeWRtoInput + i * inputSize, inputSize, derivateWRtoOutput + i * output_shape.volume(), outputSize, weights); } utils::waitAndCheckForErrors(); for (size_t i = 0; i < streams.size(); i++) { cudaStreamDestroy(streams[i]); } } template <typename T> __global__ void k_updateWeights(const T* weights, T* weights_deriv, const T* derivativeWRtoOutput,const T* input, size_t inputSize, size_t outputSize, size_t batches, size_t out_offset) { size_t weightIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t derivOutIdx = weightIndex / inputSize; size_t inpIdx = weightIndex - derivOutIdx * inputSize; if(weightIndex < (inputSize * outputSize)) { float error = 0.0f; for (int i = 0; i < batches; i++) { error += input[inpIdx + i * inputSize] * derivativeWRtoOutput[derivOutIdx + i * out_offset]; } weights_deriv[weightIndex] = error; } } void calcWeightsDeriv(const float* weights, float* weights_deriv, const float* derivativeWRtoOutput, const float* input, size_t inputSize, size_t outputSize, shape output_shape) { auto threadsPerBlock = static_cast<unsigned int>(std::min(outputSize * inputSize, static_cast<size_t>(trPerBlock))); auto blocks = utils::getBlockSize(threadsPerBlock, outputSize * inputSize); k_updateWeights << <blocks, threadsPerBlock >> > (weights, weights_deriv, derivativeWRtoOutput, input, inputSize, outputSize, output_shape.batches, output_shape.volume()); utils::waitAndCheckForErrors(); } template <typename T> __global__ void k_updateBias(const T* bias, T* bias_deriv, const T* derivative_wr_to_out, size_t output_size, size_t batches, size_t out_offset) { size_t biasIndex = blockIdx.x * blockDim.x + threadIdx.x; if (biasIndex < output_size) { float error = 0.0f; for (int i = 0; i < batches; i++) { error += derivative_wr_to_out[biasIndex + i * out_offset]; } bias_deriv[biasIndex] = error; } } void calcBiasDeriv(const float* bias, float* bias_deriv, const float* derivative_wr_to_out, size_t output_size, shape output_shape) { auto threadsPerBlock = static_cast<unsigned int>(std::min(output_size, static_cast<size_t>(trPerBlock))); auto blocks = utils::getBlockSize(threadsPerBlock, output_size); k_updateBias << <blocks, threadsPerBlock >> > (bias, bias_deriv, derivative_wr_to_out, output_size, output_shape.batches, output_shape.volume()); utils::waitAndCheckForErrors(); }
94bb87c7a39c45fdd0bc5fcaefa5e12f78d80ee3.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to use split-k version of matrix multiplication using functions and data structures provided by CUTLASS; which we run on a NVIDIA Volta GPU. What is split-k? Consider a problem size of M = 128, N = 128, K = 4096. In this case, if my thread-block tile size (a tile can be viewed as a 2d matrix) is 128x128x4096, then we launch a singled a thread-block taking up a single SM of 84 SMs present on V100. Hence the efficiency of computation is really low. So, how to solve it? This is where split-k comes in. It is a way of partitioning K-dimension of matrix multiplication and distribute across multiple SMs and get better efficiency than single SM. In the above example, we can partition K-dimension with split-k factor of 16 i.e., thread-block tile size will be 128x128x256 and will be launching on 16 SMs. Once each thread-block computes their partial inner product (1/16th of output), they accumulate to single output matrix. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on Volta and they support only half-precision floating point (fp16 or half), we use data type for elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot product to fp32, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32, 64x64x4, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::GemmSplitKParallel template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_splitk_parallel.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and // computation between elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. // Column Major for Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular // SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm70; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, // N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, // N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes ? using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>:: value, // <- This is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear // combination function // Put all the created template variables to create GemmSplitKParallel template // variable using Gemm = cutlass::gemm::device::GemmSplitKParallel< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp>; int run() { hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (props.major != 7) { std::cerr << "Volta Tensor Ops must be run on a machine with compute " "capability of 70, 72, or 75." << std::endl; // Return 0 so tests pass if run on unsupported architectures or CUDA // Toolkits. return 0; } // // Define problem size // const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N // used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N // used to store output from reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random // data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random // data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random // data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on // host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 16 partitions int split_k_slices = 16; // Create a tuple of gemm kernel arguments. This is later passed as // arguments to launch instantiated CUTLASS kernel typename Gemm::Arguments arguments{ problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix // multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { // // Volta Tensor Core operations exposed with mma.sync are first available in // CUDA 10.1. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. // if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Volta Tensor Core operations must be compiled with CUDA " "10.1 Toolkit or later." << std::endl; // Returning zero, so this test passes when built with older CUDA // Toolkits. Its action are no-op. return 0; } else { return run(); } }
94bb87c7a39c45fdd0bc5fcaefa5e12f78d80ee3.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to use split-k version of matrix multiplication using functions and data structures provided by CUTLASS; which we run on a NVIDIA Volta GPU. What is split-k? Consider a problem size of M = 128, N = 128, K = 4096. In this case, if my thread-block tile size (a tile can be viewed as a 2d matrix) is 128x128x4096, then we launch a singled a thread-block taking up a single SM of 84 SMs present on V100. Hence the efficiency of computation is really low. So, how to solve it? This is where split-k comes in. It is a way of partitioning K-dimension of matrix multiplication and distribute across multiple SMs and get better efficiency than single SM. In the above example, we can partition K-dimension with split-k factor of 16 i.e., thread-block tile size will be 128x128x256 and will be launching on 16 SMs. Once each thread-block computes their partial inner product (1/16th of output), they accumulate to single output matrix. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on Volta and they support only half-precision floating point (fp16 or half), we use data type for elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot product to fp32, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32, 64x64x4, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, intialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::GemmSplitKParallel template. The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to intialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_splitk_parallel.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and // computation between elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. // Column Major for Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular // SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm70; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, // N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, // N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes ? using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>:: value, // <- This is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear // combination function // Put all the created template variables to create GemmSplitKParallel template // variable using Gemm = cutlass::gemm::device::GemmSplitKParallel< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp>; int run() { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major != 7) { std::cerr << "Volta Tensor Ops must be run on a machine with compute " "capability of 70, 72, or 75." << std::endl; // Return 0 so tests pass if run on unsupported architectures or CUDA // Toolkits. return 0; } // // Define problem size // const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N // used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N // used to store output from reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random // data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random // data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random // data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on // host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 16 partitions int split_k_slices = 16; // Create a tuple of gemm kernel arguments. This is later passed as // arguments to launch instantiated CUTLASS kernel typename Gemm::Arguments arguments{ problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix // multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { // // Volta Tensor Core operations exposed with mma.sync are first available in // CUDA 10.1. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. // if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Volta Tensor Core operations must be compiled with CUDA " "10.1 Toolkit or later." << std::endl; // Returning zero, so this test passes when built with older CUDA // Toolkits. Its action are no-op. return 0; } else { return run(); } }
a4d30596efbcec9692699105097d9fcc67794d59.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(result, src); #endif } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(hipGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value, scalar_t max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } #endif namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
a4d30596efbcec9692699105097d9fcc67794d59.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(result, src); #endif } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(cudaGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value, scalar_t max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } #endif namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
2b119a53323684b093edcac237a59054c6dd2a61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Host Side Code for Cross-correlation in GPU #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> #include "corr2cuda.h" #include "normXcorr_GPUKernel_Cuda.cu" using namespace std; Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix_Pre(int height, int width,int init); Matrix AllocateMatrix_Post(int height, int width,int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); bool CompareResults(float* A, float* B, int elements, float eps); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters,float *quality,float *dpX, float *dpY,int *dpX_H,int *dpY_H); int main(int argc,char** argv) { // Input Parameters if(argc!=11) // Both Pre and Post Padded Image Size should be provided as Input { printf("Usage %s Parameters missing\n",argv[0]); return 1; } int imageWidth_pre = atoi(argv[1]); int imageHeight_pre = atoi(argv[2]); int imageWidth_post = atoi(argv[3]); int imageHeight_post = atoi(argv[4]); int SEARCH_X = atoi(argv[5]); int SEARCH_Y = atoi(argv[6]); int KERNEL_X = atoi(argv[7]); int KERNEL_Y = atoi(argv[8]); int numX = atoi(argv[9]); int numY = atoi(argv[10]); int DisplacementSize = numX*numY; int Corr_size = SEARCH_X*SEARCH_Y; Matrix Pre; Matrix Post; float OVERLAP = 50.0; params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY}; Pre = AllocateMatrix_Pre(imageHeight_pre,imageWidth_pre, 1); Post = AllocateMatrix_Post(imageHeight_post,imageWidth_post, 1); float gpuTime=0.f; float *CorrH; hipHostMalloc((void**)&CorrH, Corr_size*DisplacementSize*sizeof(float)); //CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float)); float *quality; quality = (float*)malloc(DisplacementSize*sizeof(float)); float *dpX; dpX = (float*)malloc(DisplacementSize*sizeof(float)); float *dpY; dpY = (float*)malloc(DisplacementSize*sizeof(float)); int* dpX_H; dpX_H = (int*)malloc(sizeof(int)*parameters.numX*parameters.numY); int* dpY_H; dpY_H = (int*)malloc(sizeof(int)*parameters.numX*parameters.numY); float elapsedTime_inc; hipEvent_t startEvent_inc, stopEvent_inc; hipEventCreate(&startEvent_inc); hipEventCreate(&stopEvent_inc); hipEventRecord(startEvent_inc,0); // starting timing for inclusive CorrelationOnDevice(Pre, Post, CorrH, parameters,quality,dpX,dpY,dpX_H,dpY_H); // Execution Model for GPU is set up in this function hipEventRecord(stopEvent_inc,0); //ending timing for inclusive hipEventSynchronize(stopEvent_inc); hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc); gpuTime = elapsedTime_inc; //for(int h=0;h<DisplacementSize;h++){ /*int h = DisplacementSize - 1; for(int g=0;g<SEARCH_Y;g++){ for(int z=0;z<SEARCH_X;z++){ printf("%f ",CorrH[(h*SEARCH_Y+g)*SEARCH_X+z]); } printf("\n"); } printf("\n");*/ //} float cp_dpY = 0.1464; float cp_dpX = -.0992; int u = DisplacementSize -1; printf("dpY_CPU = %0.4f\ndpY_GPU = %0.4f\ndpX_CPU = %0.4f\ndpX_GPU = %0.4f\n",cp_dpY,dpY[u],cp_dpX,dpX[u]); /*for(int u=0;u<DisplacementSize;u++) { printf(" %d %f %f %f\n",u,quality[u],dpY[u],dpX[u]); }*/ /*for(int u=0;u<parameters.numX*parameters.numY;u++) { printf("%d %d\n",dpY_H[u],dpX_H[u]); }*/ printf("Elasped Time = %f\n",gpuTime); // Free matrices FreeMatrix(&Pre); FreeMatrix(&Post); return 0; } //// Cuda Kernel Call ////// void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters,float *quality,float *dpX, float *dpY,int *dpX_H,int *dpY_H) { // Load Pre and Post to the device Matrix Pred = AllocateDeviceMatrix(Pre); CopyToDeviceMatrix(Pred, Pre); Matrix Postd = AllocateDeviceMatrix(Post); CopyToDeviceMatrix(Postd, Post); // Allocate Space for Pre-Mean float *preMean; float *preVar; hipMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY); hipMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY); //Allocate Space for Post-mean float *postMean; float *postVar; hipMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY); hipMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY); // Device Memory Allocation for Cross-correlation Result float *CorrD; hipMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY); //Initialize Values for Displacement Results float *qualityD; hipMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY); int *dpX_D; hipMalloc((void **)&dpX_D,sizeof(int)*parameters.numX*parameters.numY); int *dpY_D; hipMalloc((void **)&dpY_D,sizeof(int)*parameters.numX*parameters.numY); float *dpX_sD; hipMalloc((void **)&dpX_sD,sizeof(float)*parameters.numX*parameters.numY); float *dpY_sD; hipMalloc((void **)&dpY_sD,sizeof(float)*parameters.numX*parameters.numY); // Setup the execution configuration dim3 dimBlock(parameters.searchX, parameters.searchY); dim3 dimGrid(parameters.numX, parameters.numY); // Launch the device computation threads! // Kernel Call for NCC Calculation hipLaunchKernelGGL(( normXcorr_GPU), dim3(dimGrid), dim3(dimBlock), parameters.kernelX*parameters.kernelY*sizeof(float), 0, Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar); // Kernel Call for Peak Finding for CC Results int smemSize = 1024*sizeof(int)+1024*sizeof(float); hipLaunchKernelGGL(( MaxElement), dim3(dimGrid),dim3(1024),smemSize, 0, CorrD,parameters,qualityD,dpX_D,dpY_D); // Subsample Estimation int numthreads = 512; int numblocks = (parameters.numX*parameters.numY + numthreads -1)/numthreads; hipLaunchKernelGGL(( subsample), dim3(numblocks),dim3(numthreads), 0, 0, CorrD,parameters,qualityD,dpX_D,dpY_D,dpX_sD,dpY_sD); // Copying the Results from GPU to CPU //hipMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,hipMemcpyDeviceToHost); hipMemcpy(quality,qualityD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost); hipMemcpy(dpX_H,dpX_D,sizeof(int)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost); hipMemcpy(dpY_H,dpY_D,sizeof(int)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost); hipMemcpy(dpX,dpX_sD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost); hipMemcpy(dpY,dpY_sD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost); // Free device matrices FreeDeviceMatrix(&Pred); FreeDeviceMatrix(&Postd); hipFree(CorrD); hipFree(qualityD); hipFree(dpY_D); hipFree(dpX_D); hipFree(dpY_sD); hipFree(dpX_sD); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } Matrix AllocateMatrix_Pre(int height, int width,int init) // 1 is file read/ 0 is just allocation { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; FILE *fp; fp = fopen("Pre_RF.inp","r"); hipHostMalloc((void**)&M.elements, size*sizeof(float)); if(init) { for(unsigned int i = 0; i < M.width * M.height; i++) { fscanf(fp,"%f",&M.elements[i]); } } return M; } Matrix AllocateMatrix_Post(int height, int width,int init) // 1 is file read/ 0 is just allocation { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; FILE *fp; fp = fopen("Post_RF.inp","r"); hipHostMalloc((void**)&M.elements, size*sizeof(float)); if(init) { for(unsigned int i = 0; i < M.width * M.height; i++) { fscanf(fp,"%f",&M.elements[i]); } } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { //free(M->elements); hipHostFree(M->elements); M->elements = NULL; }
2b119a53323684b093edcac237a59054c6dd2a61.cu
// Host Side Code for Cross-correlation in GPU #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> #include "corr2cuda.h" #include "normXcorr_GPUKernel_Cuda.cu" using namespace std; Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix_Pre(int height, int width,int init); Matrix AllocateMatrix_Post(int height, int width,int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); bool CompareResults(float* A, float* B, int elements, float eps); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters,float *quality,float *dpX, float *dpY,int *dpX_H,int *dpY_H); int main(int argc,char** argv) { // Input Parameters if(argc!=11) // Both Pre and Post Padded Image Size should be provided as Input { printf("Usage %s Parameters missing\n",argv[0]); return 1; } int imageWidth_pre = atoi(argv[1]); int imageHeight_pre = atoi(argv[2]); int imageWidth_post = atoi(argv[3]); int imageHeight_post = atoi(argv[4]); int SEARCH_X = atoi(argv[5]); int SEARCH_Y = atoi(argv[6]); int KERNEL_X = atoi(argv[7]); int KERNEL_Y = atoi(argv[8]); int numX = atoi(argv[9]); int numY = atoi(argv[10]); int DisplacementSize = numX*numY; int Corr_size = SEARCH_X*SEARCH_Y; Matrix Pre; Matrix Post; float OVERLAP = 50.0; params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY}; Pre = AllocateMatrix_Pre(imageHeight_pre,imageWidth_pre, 1); Post = AllocateMatrix_Post(imageHeight_post,imageWidth_post, 1); float gpuTime=0.f; float *CorrH; cudaMallocHost((void**)&CorrH, Corr_size*DisplacementSize*sizeof(float)); //CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float)); float *quality; quality = (float*)malloc(DisplacementSize*sizeof(float)); float *dpX; dpX = (float*)malloc(DisplacementSize*sizeof(float)); float *dpY; dpY = (float*)malloc(DisplacementSize*sizeof(float)); int* dpX_H; dpX_H = (int*)malloc(sizeof(int)*parameters.numX*parameters.numY); int* dpY_H; dpY_H = (int*)malloc(sizeof(int)*parameters.numX*parameters.numY); float elapsedTime_inc; cudaEvent_t startEvent_inc, stopEvent_inc; cudaEventCreate(&startEvent_inc); cudaEventCreate(&stopEvent_inc); cudaEventRecord(startEvent_inc,0); // starting timing for inclusive CorrelationOnDevice(Pre, Post, CorrH, parameters,quality,dpX,dpY,dpX_H,dpY_H); // Execution Model for GPU is set up in this function cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive cudaEventSynchronize(stopEvent_inc); cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc); gpuTime = elapsedTime_inc; //for(int h=0;h<DisplacementSize;h++){ /*int h = DisplacementSize - 1; for(int g=0;g<SEARCH_Y;g++){ for(int z=0;z<SEARCH_X;z++){ printf("%f ",CorrH[(h*SEARCH_Y+g)*SEARCH_X+z]); } printf("\n"); } printf("\n");*/ //} float cp_dpY = 0.1464; float cp_dpX = -.0992; int u = DisplacementSize -1; printf("dpY_CPU = %0.4f\ndpY_GPU = %0.4f\ndpX_CPU = %0.4f\ndpX_GPU = %0.4f\n",cp_dpY,dpY[u],cp_dpX,dpX[u]); /*for(int u=0;u<DisplacementSize;u++) { printf(" %d %f %f %f\n",u,quality[u],dpY[u],dpX[u]); }*/ /*for(int u=0;u<parameters.numX*parameters.numY;u++) { printf("%d %d\n",dpY_H[u],dpX_H[u]); }*/ printf("Elasped Time = %f\n",gpuTime); // Free matrices FreeMatrix(&Pre); FreeMatrix(&Post); return 0; } //// Cuda Kernel Call ////// void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters,float *quality,float *dpX, float *dpY,int *dpX_H,int *dpY_H) { // Load Pre and Post to the device Matrix Pred = AllocateDeviceMatrix(Pre); CopyToDeviceMatrix(Pred, Pre); Matrix Postd = AllocateDeviceMatrix(Post); CopyToDeviceMatrix(Postd, Post); // Allocate Space for Pre-Mean float *preMean; float *preVar; cudaMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY); cudaMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY); //Allocate Space for Post-mean float *postMean; float *postVar; cudaMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY); cudaMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY); // Device Memory Allocation for Cross-correlation Result float *CorrD; cudaMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY); //Initialize Values for Displacement Results float *qualityD; cudaMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY); int *dpX_D; cudaMalloc((void **)&dpX_D,sizeof(int)*parameters.numX*parameters.numY); int *dpY_D; cudaMalloc((void **)&dpY_D,sizeof(int)*parameters.numX*parameters.numY); float *dpX_sD; cudaMalloc((void **)&dpX_sD,sizeof(float)*parameters.numX*parameters.numY); float *dpY_sD; cudaMalloc((void **)&dpY_sD,sizeof(float)*parameters.numX*parameters.numY); // Setup the execution configuration dim3 dimBlock(parameters.searchX, parameters.searchY); dim3 dimGrid(parameters.numX, parameters.numY); // Launch the device computation threads! // Kernel Call for NCC Calculation normXcorr_GPU<<<dimGrid, dimBlock, parameters.kernelX*parameters.kernelY*sizeof(float)>>>(Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar); // Kernel Call for Peak Finding for CC Results int smemSize = 1024*sizeof(int)+1024*sizeof(float); MaxElement<<<dimGrid,1024,smemSize>>>(CorrD,parameters,qualityD,dpX_D,dpY_D); // Subsample Estimation int numthreads = 512; int numblocks = (parameters.numX*parameters.numY + numthreads -1)/numthreads; subsample<<<numblocks,numthreads>>>(CorrD,parameters,qualityD,dpX_D,dpY_D,dpX_sD,dpY_sD); // Copying the Results from GPU to CPU //cudaMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,cudaMemcpyDeviceToHost); cudaMemcpy(quality,qualityD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost); cudaMemcpy(dpX_H,dpX_D,sizeof(int)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost); cudaMemcpy(dpY_H,dpY_D,sizeof(int)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost); cudaMemcpy(dpX,dpX_sD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost); cudaMemcpy(dpY,dpY_sD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost); // Free device matrices FreeDeviceMatrix(&Pred); FreeDeviceMatrix(&Postd); cudaFree(CorrD); cudaFree(qualityD); cudaFree(dpY_D); cudaFree(dpX_D); cudaFree(dpY_sD); cudaFree(dpX_sD); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } Matrix AllocateMatrix_Pre(int height, int width,int init) // 1 is file read/ 0 is just allocation { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; FILE *fp; fp = fopen("Pre_RF.inp","r"); cudaMallocHost((void**)&M.elements, size*sizeof(float)); if(init) { for(unsigned int i = 0; i < M.width * M.height; i++) { fscanf(fp,"%f",&M.elements[i]); } } return M; } Matrix AllocateMatrix_Post(int height, int width,int init) // 1 is file read/ 0 is just allocation { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; FILE *fp; fp = fopen("Post_RF.inp","r"); cudaMallocHost((void**)&M.elements, size*sizeof(float)); if(init) { for(unsigned int i = 0; i < M.width * M.height; i++) { fscanf(fp,"%f",&M.elements[i]); } } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { //free(M->elements); cudaFreeHost(M->elements); M->elements = NULL; }
2ae4f892f0374a630b8165ed111a486410381ccc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Implementing histogram equalization in CUDA // @Jiangyan Feng, [email protected] // Sparse Matrix Vector Multiplication (JDS) #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols, int *matRowPerm, int *matRows, float *matData, float *vec, int dim) { //@@ insert spmv kernel for jds format int row = blockIdx.x * blockDim.x + threadIdx.x; if (row < dim){ float dot = 0; int ele = 0; while (ele < matRows[row]){ int row_idx = matColStart[ele] + row; int col_idx = matCols[matColStart[ele] + row]; dot += matData[row_idx] * vec[col_idx]; ele ++; } out[matRowPerm[row]] = dot; } } static void spmvJDS(float *out, int *matColStart, int *matCols, int *matRowPerm, int *matRows, float *matData, float *vec, int dim) { //@@ invoke spmv kernel for jds format dim3 dimGrid(ceil(dim/(256.0)), 1, 1); dim3 dimBlock(256, 1, 1); hipLaunchKernelGGL(( spmvJDSKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, out, matColStart, matCols, matRowPerm, matRows, matData, vec, dim); } int main(int argc, char **argv) { wbArg_t args; int *hostCSRCols; int *hostCSRRows; float *hostCSRData; int *hostJDSColStart; int *hostJDSCols; int *hostJDSRowPerm; int *hostJDSRows; float *hostJDSData; float *hostVector; float *hostOutput; int *deviceJDSColStart; int *deviceJDSCols; int *deviceJDSRowPerm; int *deviceJDSRows; float *deviceJDSData; float *deviceVector; float *deviceOutput; int dim, ncols, nrows, ndata; int maxRowNNZ; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 0), &ncols, "Integer"); hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 1), &nrows, "Integer"); hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 2), &ndata, "Real"); hostVector = (float *)wbImport(wbArg_getInputFile(args, 3), &dim, "Real"); hostOutput = (float *)malloc(sizeof(float) * dim); wbTime_stop(Generic, "Importing data and creating memory on host"); CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm, &hostJDSRows, &hostJDSColStart, &hostJDSCols, &hostJDSData); maxRowNNZ = hostJDSRows[0]; wbTime_start(GPU, "Allocating GPU memory."); hipMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ); hipMalloc((void **)&deviceJDSCols, sizeof(int) * ndata); hipMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim); hipMalloc((void **)&deviceJDSRows, sizeof(int) * dim); hipMalloc((void **)&deviceJDSData, sizeof(float) * ndata); hipMalloc((void **)&deviceVector, sizeof(float) * dim); hipMalloc((void **)&deviceOutput, sizeof(float) * dim); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); hipMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ, hipMemcpyHostToDevice); hipMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata, hipMemcpyHostToDevice); hipMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim, hipMemcpyHostToDevice); hipMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim, hipMemcpyHostToDevice); hipMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata, hipMemcpyHostToDevice); hipMemcpy(deviceVector, hostVector, sizeof(float) * dim, hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); wbTime_start(Compute, "Performing CUDA computation"); spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm, deviceJDSRows, deviceJDSData, deviceVector, dim); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); hipMemcpy(hostOutput, deviceOutput, sizeof(float) * dim, hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); hipFree(deviceVector); hipFree(deviceOutput); hipFree(deviceJDSColStart); hipFree(deviceJDSCols); hipFree(deviceJDSRowPerm); hipFree(deviceJDSRows); hipFree(deviceJDSData); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, dim); free(hostCSRCols); free(hostCSRRows); free(hostCSRData); free(hostVector); free(hostOutput); free(hostJDSColStart); free(hostJDSCols); free(hostJDSRowPerm); free(hostJDSRows); free(hostJDSData); return 0; }
2ae4f892f0374a630b8165ed111a486410381ccc.cu
// Implementing histogram equalization in CUDA // @Jiangyan Feng, [email protected] // Sparse Matrix Vector Multiplication (JDS) #include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void spmvJDSKernel(float *out, int *matColStart, int *matCols, int *matRowPerm, int *matRows, float *matData, float *vec, int dim) { //@@ insert spmv kernel for jds format int row = blockIdx.x * blockDim.x + threadIdx.x; if (row < dim){ float dot = 0; int ele = 0; while (ele < matRows[row]){ int row_idx = matColStart[ele] + row; int col_idx = matCols[matColStart[ele] + row]; dot += matData[row_idx] * vec[col_idx]; ele ++; } out[matRowPerm[row]] = dot; } } static void spmvJDS(float *out, int *matColStart, int *matCols, int *matRowPerm, int *matRows, float *matData, float *vec, int dim) { //@@ invoke spmv kernel for jds format dim3 dimGrid(ceil(dim/(256.0)), 1, 1); dim3 dimBlock(256, 1, 1); spmvJDSKernel<<<dimGrid, dimBlock>>>(out, matColStart, matCols, matRowPerm, matRows, matData, vec, dim); } int main(int argc, char **argv) { wbArg_t args; int *hostCSRCols; int *hostCSRRows; float *hostCSRData; int *hostJDSColStart; int *hostJDSCols; int *hostJDSRowPerm; int *hostJDSRows; float *hostJDSData; float *hostVector; float *hostOutput; int *deviceJDSColStart; int *deviceJDSCols; int *deviceJDSRowPerm; int *deviceJDSRows; float *deviceJDSData; float *deviceVector; float *deviceOutput; int dim, ncols, nrows, ndata; int maxRowNNZ; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostCSRCols = (int *)wbImport(wbArg_getInputFile(args, 0), &ncols, "Integer"); hostCSRRows = (int *)wbImport(wbArg_getInputFile(args, 1), &nrows, "Integer"); hostCSRData = (float *)wbImport(wbArg_getInputFile(args, 2), &ndata, "Real"); hostVector = (float *)wbImport(wbArg_getInputFile(args, 3), &dim, "Real"); hostOutput = (float *)malloc(sizeof(float) * dim); wbTime_stop(Generic, "Importing data and creating memory on host"); CSRToJDS(dim, hostCSRRows, hostCSRCols, hostCSRData, &hostJDSRowPerm, &hostJDSRows, &hostJDSColStart, &hostJDSCols, &hostJDSData); maxRowNNZ = hostJDSRows[0]; wbTime_start(GPU, "Allocating GPU memory."); cudaMalloc((void **)&deviceJDSColStart, sizeof(int) * maxRowNNZ); cudaMalloc((void **)&deviceJDSCols, sizeof(int) * ndata); cudaMalloc((void **)&deviceJDSRowPerm, sizeof(int) * dim); cudaMalloc((void **)&deviceJDSRows, sizeof(int) * dim); cudaMalloc((void **)&deviceJDSData, sizeof(float) * ndata); cudaMalloc((void **)&deviceVector, sizeof(float) * dim); cudaMalloc((void **)&deviceOutput, sizeof(float) * dim); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); cudaMemcpy(deviceJDSColStart, hostJDSColStart, sizeof(int) * maxRowNNZ, cudaMemcpyHostToDevice); cudaMemcpy(deviceJDSCols, hostJDSCols, sizeof(int) * ndata, cudaMemcpyHostToDevice); cudaMemcpy(deviceJDSRowPerm, hostJDSRowPerm, sizeof(int) * dim, cudaMemcpyHostToDevice); cudaMemcpy(deviceJDSRows, hostJDSRows, sizeof(int) * dim, cudaMemcpyHostToDevice); cudaMemcpy(deviceJDSData, hostJDSData, sizeof(float) * ndata, cudaMemcpyHostToDevice); cudaMemcpy(deviceVector, hostVector, sizeof(float) * dim, cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); wbTime_start(Compute, "Performing CUDA computation"); spmvJDS(deviceOutput, deviceJDSColStart, deviceJDSCols, deviceJDSRowPerm, deviceJDSRows, deviceJDSData, deviceVector, dim); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * dim, cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); cudaFree(deviceVector); cudaFree(deviceOutput); cudaFree(deviceJDSColStart); cudaFree(deviceJDSCols); cudaFree(deviceJDSRowPerm); cudaFree(deviceJDSRows); cudaFree(deviceJDSData); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, dim); free(hostCSRCols); free(hostCSRRows); free(hostCSRData); free(hostVector); free(hostOutput); free(hostJDSColStart); free(hostJDSCols); free(hostJDSRowPerm); free(hostJDSRows); free(hostJDSData); return 0; }
eb852e6289aa19b88cd0baeda3b799fc05e3dd69.hip
// !!! This is a file automatically generated by hipify!!! #include "math_functions.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <math.h> __global__ void compute_hiddenDW_kernel( double *weight_hidden, const double *u, const double * input, const int nInput, const int nHidden,const int T_size, const int nT, const int *Time, const double * weight_toutput,const double* direction, const double *decay1, const double *decay2, const double threshold,const double V0_param,const double a1) { //determine the id of the thread int input_id = blockIdx.x*blockDim.x + threadIdx.x; int hidden_id = blockIdx.y; if(input_id > nInput - 1 ) {return;} if(hidden_id > nHidden - 1 ) {return;} int t_inputid = input_id*T_size; int t_hiddenid = hidden_id*T_size; int curT = 0; int curt = 1; int time_value = Time[curT]; double Mn = 0; double Sn = 0; double Upre = 0; double Mpre = 0; double Spre = 0; double Epre = 0; double h = 0; double tmp = 0; double DeltaW = 0; // double Mtmp = 0; while(curT < nT) { Epre = decay1[0]*Epre + h*threshold*Upre; tmp = input[t_inputid + curt - 1]; Mpre = decay1[0]*Mpre + tmp; Spre = decay2[0]*Spre + tmp; Upre = Mpre - Spre -Epre; //h function h = u[t_hiddenid + curt - 1]; if(fabs(h-threshold)<(0.5*a1)) { h = 1/a1; } else { h = 0; } tmp = V0_param*h*Upre; Mn = decay1[1]*Mn + tmp; Sn = decay2[1]*Sn + tmp; // if(h != 0) // { // Mtmp = h; // } while(curt == time_value) { DeltaW = DeltaW + direction[curT]*weight_toutput[curT*nHidden + hidden_id]*(Mn - Sn); curT = curT + 1; if(curT >= nT) { break; } time_value = Time[curT]; } curt = curt + 1; } weight_hidden[input_id + hidden_id*nInput] = DeltaW; // weight_hidden[input_id + hidden_id*nInput] = Mtmp; }
eb852e6289aa19b88cd0baeda3b799fc05e3dd69.cu
#include "math_functions.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math.h> __global__ void compute_hiddenDW_kernel( double *weight_hidden, const double *u, const double * input, const int nInput, const int nHidden,const int T_size, const int nT, const int *Time, const double * weight_toutput,const double* direction, const double *decay1, const double *decay2, const double threshold,const double V0_param,const double a1) { //determine the id of the thread int input_id = blockIdx.x*blockDim.x + threadIdx.x; int hidden_id = blockIdx.y; if(input_id > nInput - 1 ) {return;} if(hidden_id > nHidden - 1 ) {return;} int t_inputid = input_id*T_size; int t_hiddenid = hidden_id*T_size; int curT = 0; int curt = 1; int time_value = Time[curT]; double Mn = 0; double Sn = 0; double Upre = 0; double Mpre = 0; double Spre = 0; double Epre = 0; double h = 0; double tmp = 0; double DeltaW = 0; // double Mtmp = 0; while(curT < nT) { Epre = decay1[0]*Epre + h*threshold*Upre; tmp = input[t_inputid + curt - 1]; Mpre = decay1[0]*Mpre + tmp; Spre = decay2[0]*Spre + tmp; Upre = Mpre - Spre -Epre; //h function h = u[t_hiddenid + curt - 1]; if(fabs(h-threshold)<(0.5*a1)) { h = 1/a1; } else { h = 0; } tmp = V0_param*h*Upre; Mn = decay1[1]*Mn + tmp; Sn = decay2[1]*Sn + tmp; // if(h != 0) // { // Mtmp = h; // } while(curt == time_value) { DeltaW = DeltaW + direction[curT]*weight_toutput[curT*nHidden + hidden_id]*(Mn - Sn); curT = curT + 1; if(curT >= nT) { break; } time_value = Time[curT]; } curt = curt + 1; } weight_hidden[input_id + hidden_id*nInput] = DeltaW; // weight_hidden[input_id + hidden_id*nInput] = Mtmp; }
c2c3a6999aeaca9b1ba3c8082c96b81887d1878c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass: checka se o parmetro passado com sucesso //--blockDim=1024 --gridDim=1 --no-inline #include <stdio.h> #include <stdlib.h> #define N 8 __device__ float multiplyByTwo(float *v, unsigned int tid) { return v[tid] * 2.0f; } __device__ float divideByTwo(float *v, unsigned int tid) { return v[tid] * 0.5f; } typedef float(*funcType)(float*, unsigned int); __global__ void foo(float *v, funcType* f, unsigned int size) { __requires(f == multiplyByTwo | f == divideByTwo); /************************************************************/ __assert(*f == divideByTwo || *f == multiplybyTwo); /************************************************************/ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { v[tid] = (*f)(v, tid); } }
c2c3a6999aeaca9b1ba3c8082c96b81887d1878c.cu
//pass: checka se o parâmetro é passado com sucesso //--blockDim=1024 --gridDim=1 --no-inline #include <stdio.h> #include <stdlib.h> #define N 8 __device__ float multiplyByTwo(float *v, unsigned int tid) { return v[tid] * 2.0f; } __device__ float divideByTwo(float *v, unsigned int tid) { return v[tid] * 0.5f; } typedef float(*funcType)(float*, unsigned int); __global__ void foo(float *v, funcType* f, unsigned int size) { __requires(f == multiplyByTwo | f == divideByTwo); /************************************************************/ __assert(*f == divideByTwo || *f == multiplybyTwo); /************************************************************/ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { v[tid] = (*f)(v, tid); } }
5838917c293cba8e6c89234dfa48c1f33ad468ed.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2015-2019, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <forge.h> #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #define USE_FORGE_CUDA_COPY_HELPERS #include <ComputeCopy.h> #include <cstdio> #include <iostream> const unsigned DIMX = 1000; const unsigned DIMY = 800; static const float ZMIN = 0.1f; static const float ZMAX = 10.f; const float DX = 0.005f; const size_t ZSIZE = (size_t)((ZMAX-ZMIN)/DX+1); void kernel(float t, float dx, float* dev_out); int main(void) { float *dev_out; /* * First Forge call should be a window creation call * so that necessary OpenGL context is created for any * other forge::* object to be created successfully */ forge::Window wnd(DIMX, DIMY, "Three dimensional line plot demo"); wnd.makeCurrent(); forge::Chart chart(FG_CHART_3D); chart.setAxesLabelFormat("%3.1f", "%3.1f", "%.2e"); chart.setAxesLimits(-1.1f, 1.1f, -1.1f, 1.1f, 0.f, 10.f); chart.setAxesTitles("x-axis", "y-axis", "z-axis"); forge::Plot plot3 = chart.plot(ZSIZE, forge::f32); static float t=0; FORGE_CUDA_CHECK(hipMalloc((void**)&dev_out, ZSIZE * 3 * sizeof(float) )); kernel(t, DX, dev_out); GfxHandle* handle; createGLBuffer(&handle, plot3.vertices(), FORGE_VERTEX_BUFFER); /* copy your data into the vertex buffer object exposed by * forge::Plot class and then proceed to rendering. * To help the users with copying the data from compute * memory to display memory, Forge provides copy headers * along with the library to help with this task */ copyToGLBuffer(handle, (ComputeResourceHandle)dev_out, plot3.verticesSize()); do { t+=0.01f; kernel(t, DX, dev_out); copyToGLBuffer(handle, (ComputeResourceHandle)dev_out, plot3.verticesSize()); wnd.draw(chart); } while(!wnd.close()); FORGE_CUDA_CHECK(hipFree(dev_out)); releaseGLBuffer(handle); return 0; } __global__ void generateCurve(float t, float dx, float* out, const float ZMIN, const size_t ZSIZE) { int offset = blockIdx.x * blockDim.x + threadIdx.x; float z = ZMIN + offset*dx; if(offset < ZSIZE) { out[ 3 * offset ] = cos(z*t+t)/z; out[ 3 * offset + 1 ] = sin(z*t+t)/z; out[ 3 * offset + 2 ] = z + 0.1*sin(t); } } inline int divup(int a, int b) { return (a+b-1)/b; } void kernel(float t, float dx, float* dev_out) { static const dim3 threads(1024); dim3 blocks(divup(ZSIZE, 1024)); hipLaunchKernelGGL(( generateCurve), dim3(blocks), dim3(threads) , 0, 0, t, dx, dev_out, ZMIN, ZSIZE); }
5838917c293cba8e6c89234dfa48c1f33ad468ed.cu
/******************************************************* * Copyright (c) 2015-2019, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <forge.h> #include <cuda_runtime.h> #include <cuComplex.h> #define USE_FORGE_CUDA_COPY_HELPERS #include <ComputeCopy.h> #include <cstdio> #include <iostream> const unsigned DIMX = 1000; const unsigned DIMY = 800; static const float ZMIN = 0.1f; static const float ZMAX = 10.f; const float DX = 0.005f; const size_t ZSIZE = (size_t)((ZMAX-ZMIN)/DX+1); void kernel(float t, float dx, float* dev_out); int main(void) { float *dev_out; /* * First Forge call should be a window creation call * so that necessary OpenGL context is created for any * other forge::* object to be created successfully */ forge::Window wnd(DIMX, DIMY, "Three dimensional line plot demo"); wnd.makeCurrent(); forge::Chart chart(FG_CHART_3D); chart.setAxesLabelFormat("%3.1f", "%3.1f", "%.2e"); chart.setAxesLimits(-1.1f, 1.1f, -1.1f, 1.1f, 0.f, 10.f); chart.setAxesTitles("x-axis", "y-axis", "z-axis"); forge::Plot plot3 = chart.plot(ZSIZE, forge::f32); static float t=0; FORGE_CUDA_CHECK(cudaMalloc((void**)&dev_out, ZSIZE * 3 * sizeof(float) )); kernel(t, DX, dev_out); GfxHandle* handle; createGLBuffer(&handle, plot3.vertices(), FORGE_VERTEX_BUFFER); /* copy your data into the vertex buffer object exposed by * forge::Plot class and then proceed to rendering. * To help the users with copying the data from compute * memory to display memory, Forge provides copy headers * along with the library to help with this task */ copyToGLBuffer(handle, (ComputeResourceHandle)dev_out, plot3.verticesSize()); do { t+=0.01f; kernel(t, DX, dev_out); copyToGLBuffer(handle, (ComputeResourceHandle)dev_out, plot3.verticesSize()); wnd.draw(chart); } while(!wnd.close()); FORGE_CUDA_CHECK(cudaFree(dev_out)); releaseGLBuffer(handle); return 0; } __global__ void generateCurve(float t, float dx, float* out, const float ZMIN, const size_t ZSIZE) { int offset = blockIdx.x * blockDim.x + threadIdx.x; float z = ZMIN + offset*dx; if(offset < ZSIZE) { out[ 3 * offset ] = cos(z*t+t)/z; out[ 3 * offset + 1 ] = sin(z*t+t)/z; out[ 3 * offset + 2 ] = z + 0.1*sin(t); } } inline int divup(int a, int b) { return (a+b-1)/b; } void kernel(float t, float dx, float* dev_out) { static const dim3 threads(1024); dim3 blocks(divup(ZSIZE, 1024)); generateCurve<<< blocks, threads >>>(t, dx, dev_out, ZMIN, ZSIZE); }
ee03dc7483f528f6183bb968012275183ff5bc15.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include "support.h" #include "kernel.hip" #include <time.h> int main(int argc, char* argv[]) { Timer timer; time_t t; /* Intializes random number generator */ srand((unsigned) time(&t)); // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *in_h, *out_h; float *in_d, *out_d; unsigned num_elements; hipError_t cuda_ret; /* Allocate and initialize input vector */ if(argc == 1) { num_elements = 1000000; } else if(argc == 2) { num_elements = atoi(argv[1]); } else { printf("\n Invalid input parameters!" "\n Usage: ./prefix-scan # Input of size 1,000,000 is used" "\n Usage: ./prefix-scan <m> # Input of size m is used" "\n"); exit(0); } initVector(&in_h, num_elements); /* Allocate and initialize output vector */ out_h = (float*)calloc(num_elements, sizeof(float)); if(out_h == NULL) FATAL("Unable to allocate host"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" Input size = %u\n", num_elements); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); cuda_ret = hipMalloc((void**)&in_d, num_elements*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void**)&out_d, num_elements*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); cuda_ret = hipMemcpy(in_d, in_h, num_elements*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = hipMemset(out_d, 0, num_elements*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to set device memory"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); preScan(out_d, in_d, num_elements); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); cuda_ret = hipMemcpy(out_h, out_d, num_elements*sizeof(float), hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(in_h, out_h, num_elements); // Free memory ------------------------------------------------------------ cuda_ret = hipFree(in_d); if(cuda_ret != hipSuccess) FATAL("Unable free CUDA memory"); cuda_ret = hipFree(out_d); if(cuda_ret != hipSuccess) FATAL("Unable free CUDA memory"); free(in_h); free(out_h); return 0; }
ee03dc7483f528f6183bb968012275183ff5bc15.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include "support.h" #include "kernel.cu" #include <time.h> int main(int argc, char* argv[]) { Timer timer; time_t t; /* Intializes random number generator */ srand((unsigned) time(&t)); // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *in_h, *out_h; float *in_d, *out_d; unsigned num_elements; cudaError_t cuda_ret; /* Allocate and initialize input vector */ if(argc == 1) { num_elements = 1000000; } else if(argc == 2) { num_elements = atoi(argv[1]); } else { printf("\n Invalid input parameters!" "\n Usage: ./prefix-scan # Input of size 1,000,000 is used" "\n Usage: ./prefix-scan <m> # Input of size m is used" "\n"); exit(0); } initVector(&in_h, num_elements); /* Allocate and initialize output vector */ out_h = (float*)calloc(num_elements, sizeof(float)); if(out_h == NULL) FATAL("Unable to allocate host"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" Input size = %u\n", num_elements); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); cuda_ret = cudaMalloc((void**)&in_d, num_elements*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void**)&out_d, num_elements*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); cuda_ret = cudaMemcpy(in_d, in_h, num_elements*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device"); cuda_ret = cudaMemset(out_d, 0, num_elements*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to set device memory"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); preScan(out_d, in_d, num_elements); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); cuda_ret = cudaMemcpy(out_h, out_d, num_elements*sizeof(float), cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(in_h, out_h, num_elements); // Free memory ------------------------------------------------------------ cuda_ret = cudaFree(in_d); if(cuda_ret != cudaSuccess) FATAL("Unable free CUDA memory"); cuda_ret = cudaFree(out_d); if(cuda_ret != cudaSuccess) FATAL("Unable free CUDA memory"); free(in_h); free(out_h); return 0; }
6fc841414f43e48b39466d57d1a6edb917d2ad0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <system/op_boilerplate.h> #include <loops/reduce_bool.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> #include <types/types.h> #include <execution/LaunchContext.h> #include <exceptions/cuda_exception.h> #include <loops/scalar.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleReduce(const void *x, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *zShapeInfo) { functions::reduce::ReduceBoolFunction<X,Z>::template transformCudaXD<OpType>(x, outerXTadShapeInfo, innerXTadShapeInfo, vreductionBuffer, extraParams, z, zShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleScalar(const void *x, const Nd4jLong *xShapeInfo, void *extraParams, void *z, const Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { functions::reduce::ReduceBoolFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceBoolFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = reinterpret_cast<Z*>(vsPartials); auto extraParams = reinterpret_cast<X*>(vextraParams); Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceBoolFunction<X,Z>::transformCudaXD(const void *vx, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo, void *vextraParams, void *vreductionBuffer, void *vz, const Nd4jLong *zShapeInfo) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); //shared memory space for storing intermediate results __shared__ Z sPartials[CUDA_BLOCK_SIZE]; __shared__ int tadLen, numTads; __shared__ bool sameOffsets; if (threadIdx.x == 0) { sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo); tadLen = shape::length(innerXTadShapeInfo); numTads = shape::length(outerXTadShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { shape::index2coords(r, outerXTadShapeInfo, coords); const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords); const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords); const X* xTad = x + outerOffset; sPartials[threadIdx.x] = OpType::startingValue(xTad); for (int i = threadIdx.x; i < tadLen; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams); __syncthreads(); // aggregate. do NOT reduce for elements > tadLen aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLen), extraParams); __syncthreads(); if (threadIdx.x == 0) z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceBoolFunction<X,Z>::execScalarCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vextraParams, void *vz, const Nd4jLong *zShapeInfo, void *vreductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results __shared__ Z sPartials[CUDA_BLOCK_SIZE]; __shared__ Nd4jLong xEws; __shared__ Nd4jLong len; if(threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } else { if (threadIdx.x == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceBoolFunction<X,Z>::intermediateXD(dim3 launchDims, hipStream_t *stream, const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int* dims) { if(shape::isEmpty(hXShapeInfo)) { if(shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x))); auto res = hipMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceBoolFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res); auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer(); // scalar assign functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hZShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr); sd::DebugHelper::checkErrorCode(stream, "reduceBoolDim empty(...) failed"); } else { const int zRank = shape::rank(hZShapeInfo); const int tadRank = shape::rank(hXShapeInfo) - zRank; auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank); auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims+zRank, tadRank); hipLaunchKernelGGL(( simpleReduce<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, reinterpret_cast<Nd4jLong const*>(outerPack.special()), reinterpret_cast<Nd4jLong const*>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo); sd::DebugHelper::checkErrorCode(stream, "reduceBoolDim(...) failed"); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceBoolFunction<X,Z>::intermediateScalar(dim3 launchDims, hipStream_t *stream, const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *z, const Nd4jLong *zShapeInfo, const Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x))); auto res = hipMemcpyAsync(z, &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceBoolFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res); sd::DebugHelper::checkErrorCode(stream, "reduceBoolScalar empty(...) failed"); } else { hipLaunchKernelGGL(( simpleScalar<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); sd::DebugHelper::checkErrorCode(stream, "reduceBoolScalar(...) failed"); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceBoolFunction<X,Y>::execReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *z, const Nd4jLong *zShapeInfo, const Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_BOOL_OPS)); sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceBoolFunction<X,Y>::execReduceXD(dim3 launchDims, hipStream_t *stream, const int opNum, const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int *dims) { if(shape::length(hZShapeInfo) == 1) { ReduceBoolFunction<X,Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr); } else { DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), OPS_A(REDUCE_BOOL_OPS)); } DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *) *sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceBoolFunction, , LIBND4J_TYPES, BOOL_TYPES); } }
6fc841414f43e48b39466d57d1a6edb917d2ad0e.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <system/op_boilerplate.h> #include <loops/reduce_bool.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> #include <types/types.h> #include <execution/LaunchContext.h> #include <exceptions/cuda_exception.h> #include <loops/scalar.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleReduce(const void *x, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *zShapeInfo) { functions::reduce::ReduceBoolFunction<X,Z>::template transformCudaXD<OpType>(x, outerXTadShapeInfo, innerXTadShapeInfo, vreductionBuffer, extraParams, z, zShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleScalar(const void *x, const Nd4jLong *xShapeInfo, void *extraParams, void *z, const Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { functions::reduce::ReduceBoolFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceBoolFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = reinterpret_cast<Z*>(vsPartials); auto extraParams = reinterpret_cast<X*>(vextraParams); Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceBoolFunction<X,Z>::transformCudaXD(const void *vx, const Nd4jLong *outerXTadShapeInfo, const Nd4jLong *innerXTadShapeInfo, void *vextraParams, void *vreductionBuffer, void *vz, const Nd4jLong *zShapeInfo) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); //shared memory space for storing intermediate results __shared__ Z sPartials[CUDA_BLOCK_SIZE]; __shared__ int tadLen, numTads; __shared__ bool sameOffsets; if (threadIdx.x == 0) { sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo); tadLen = shape::length(innerXTadShapeInfo); numTads = shape::length(outerXTadShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { shape::index2coords(r, outerXTadShapeInfo, coords); const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords); const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords); const X* xTad = x + outerOffset; sPartials[threadIdx.x] = OpType::startingValue(xTad); for (int i = threadIdx.x; i < tadLen; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams); __syncthreads(); // aggregate. do NOT reduce for elements > tadLen aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLen), extraParams); __syncthreads(); if (threadIdx.x == 0) z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceBoolFunction<X,Z>::execScalarCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vextraParams, void *vz, const Nd4jLong *zShapeInfo, void *vreductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { auto x = reinterpret_cast<const X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results __shared__ Z sPartials[CUDA_BLOCK_SIZE]; __shared__ Nd4jLong xEws; __shared__ Nd4jLong len; if(threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } else { if (threadIdx.x == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceBoolFunction<X,Z>::intermediateXD(dim3 launchDims, cudaStream_t *stream, const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int* dims) { if(shape::isEmpty(hXShapeInfo)) { if(shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x))); auto res = cudaMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceBoolFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res); auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer(); // scalar assign functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hZShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr); sd::DebugHelper::checkErrorCode(stream, "reduceBoolDim empty(...) failed"); } else { const int zRank = shape::rank(hZShapeInfo); const int tadRank = shape::rank(hXShapeInfo) - zRank; auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank); auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims+zRank, tadRank); simpleReduce<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, reinterpret_cast<Nd4jLong const*>(outerPack.special()), reinterpret_cast<Nd4jLong const*>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo); sd::DebugHelper::checkErrorCode(stream, "reduceBoolDim(...) failed"); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceBoolFunction<X,Z>::intermediateScalar(dim3 launchDims, cudaStream_t *stream, const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *z, const Nd4jLong *zShapeInfo, const Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X*>(x))); auto res = cudaMemcpyAsync(z, &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceBoolFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res); sd::DebugHelper::checkErrorCode(stream, "reduceBoolScalar empty(...) failed"); } else { simpleScalar<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); sd::DebugHelper::checkErrorCode(stream, "reduceBoolScalar(...) failed"); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceBoolFunction<X,Y>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const Nd4jLong *xShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *z, const Nd4jLong *zShapeInfo, const Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_BOOL_OPS)); sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceBoolFunction<X,Y>::execReduceXD(dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const Nd4jLong *dXShapeInfo, const Nd4jLong *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const Nd4jLong *dZShapeInfo, const Nd4jLong *hZShapeInfo, const int *dims) { if(shape::length(hZShapeInfo) == 1) { ReduceBoolFunction<X,Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr); } else { DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), OPS_A(REDUCE_BOOL_OPS)); } DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *) *sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceBoolFunction, , LIBND4J_TYPES, BOOL_TYPES); } }
8e57d3823af89eaede21f1fdff6b5830bcb491d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #define CHECK_CUDA_ERROR(exp) { \ hipError_t ret = (exp); \ if (ret != hipSuccess) { \ fprintf(stderr, "[error] %s:%d: %s (%s)\n", \ __FILE__, __LINE__, \ hipGetErrorName(ret), \ hipGetErrorString(ret)); \ exit(EXIT_FAILURE); \ } \ } // a kernel that multiplies a vector y with a scalar alpha __global__ void ax_kernel(int n, double alpha, double *y) { // // Each thread is going to begin from the array element that matches it's // global index number. For blockDim.x = 4, gridDim.x 2, we have: // threadIdx.x : 0 1 2 3 0 1 2 3 // blockIdx.x : 0 0 0 0 1 1 1 1 // blockDim.x : 4 4 4 4 4 4 4 4 // thread_id : 0 1 2 3,4 5 6 7 // int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int thread_count = gridDim.x * blockDim.x; // // Each thread is going to jump over <grid dimension> * <block dimension> // array elements. For blockDim.x = 4, gridDim.x 2, we have: // 0 1 2 3,4 5 6 7|0 1 2 3,4 5 6 7|0 1 2 3,4 5 6 7|0 1 2 3,4 5 6 7|0 ... // for (int i = thread_id; i < n; i += thread_count) y[i] = alpha * y[i]; } int main(int argc, char const **argv) { double alpha = 2.0; // read and validate the command line arguments if (argc < 2) { fprintf(stderr, "[error] No vector length was supplied.\n"); return EXIT_FAILURE; } int n = atof(argv[1]); if (n < 1) { fprintf(stderr, "[error] The vector length was invalid.\n"); return EXIT_FAILURE; } srand(time(NULL)); // allocate host memory for the vector and it's duplicate double *y, *_y; if ((y = (double *) malloc(n*sizeof(double))) == NULL) { fprintf(stderr, "[error] Failed to allocate host memory for vector y.\n"); return EXIT_FAILURE; } if ((_y = (double *) malloc(n*sizeof(double))) == NULL) { fprintf(stderr, "[error] Failed to allocate host memory for vector _y.\n"); return EXIT_FAILURE; } // initialize host memory and store a copy for a later validation for (int i = 0; i < n; i++) y[i] = _y[i] = 1.0*rand()/RAND_MAX; // allocate device memory double *d_y; CHECK_CUDA_ERROR(hipMalloc(&d_y, n*sizeof(double))); // copy the vector from the host memory to the device memory CHECK_CUDA_ERROR( hipMemcpy(d_y, y, n*sizeof(double), hipMemcpyHostToDevice)); // start timer struct timespec ts_start; clock_gettime(CLOCK_MONOTONIC, &ts_start); // launch the kernel dim3 threads = 256; dim3 blocks = max(1, min(256, n/threads.x)); hipLaunchKernelGGL(( ax_kernel), dim3(blocks), dim3(threads), 0, 0, n, alpha, d_y); // wait until the device is ready and stop the timer CHECK_CUDA_ERROR(hipDeviceSynchronize()); struct timespec ts_stop; clock_gettime(CLOCK_MONOTONIC, &ts_stop); // calculate metrics double time = ts_stop.tv_sec - ts_start.tv_sec + 1.0e-9*(ts_stop.tv_nsec - ts_start.tv_nsec); printf("Time = %f s\n", time); printf("Floprate = %.1f GFlops\n", 1.0E-9 * n / time); printf("Memory throughput = %.0f GB/s\n", 1.0E-9 * 2 * n * sizeof(double) / time); // free the allocated memory free(y); free(_y); CHECK_CUDA_ERROR(hipFree(d_y)); }
8e57d3823af89eaede21f1fdff6b5830bcb491d6.cu
#include <stdlib.h> #include <stdio.h> #define CHECK_CUDA_ERROR(exp) { \ cudaError_t ret = (exp); \ if (ret != cudaSuccess) { \ fprintf(stderr, "[error] %s:%d: %s (%s)\n", \ __FILE__, __LINE__, \ cudaGetErrorName(ret), \ cudaGetErrorString(ret)); \ exit(EXIT_FAILURE); \ } \ } // a kernel that multiplies a vector y with a scalar alpha __global__ void ax_kernel(int n, double alpha, double *y) { // // Each thread is going to begin from the array element that matches it's // global index number. For blockDim.x = 4, gridDim.x 2, we have: // threadIdx.x : 0 1 2 3 0 1 2 3 // blockIdx.x : 0 0 0 0 1 1 1 1 // blockDim.x : 4 4 4 4 4 4 4 4 // thread_id : 0 1 2 3,4 5 6 7 // int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int thread_count = gridDim.x * blockDim.x; // // Each thread is going to jump over <grid dimension> * <block dimension> // array elements. For blockDim.x = 4, gridDim.x 2, we have: // 0 1 2 3,4 5 6 7|0 1 2 3,4 5 6 7|0 1 2 3,4 5 6 7|0 1 2 3,4 5 6 7|0 ... // for (int i = thread_id; i < n; i += thread_count) y[i] = alpha * y[i]; } int main(int argc, char const **argv) { double alpha = 2.0; // read and validate the command line arguments if (argc < 2) { fprintf(stderr, "[error] No vector length was supplied.\n"); return EXIT_FAILURE; } int n = atof(argv[1]); if (n < 1) { fprintf(stderr, "[error] The vector length was invalid.\n"); return EXIT_FAILURE; } srand(time(NULL)); // allocate host memory for the vector and it's duplicate double *y, *_y; if ((y = (double *) malloc(n*sizeof(double))) == NULL) { fprintf(stderr, "[error] Failed to allocate host memory for vector y.\n"); return EXIT_FAILURE; } if ((_y = (double *) malloc(n*sizeof(double))) == NULL) { fprintf(stderr, "[error] Failed to allocate host memory for vector _y.\n"); return EXIT_FAILURE; } // initialize host memory and store a copy for a later validation for (int i = 0; i < n; i++) y[i] = _y[i] = 1.0*rand()/RAND_MAX; // allocate device memory double *d_y; CHECK_CUDA_ERROR(cudaMalloc(&d_y, n*sizeof(double))); // copy the vector from the host memory to the device memory CHECK_CUDA_ERROR( cudaMemcpy(d_y, y, n*sizeof(double), cudaMemcpyHostToDevice)); // start timer struct timespec ts_start; clock_gettime(CLOCK_MONOTONIC, &ts_start); // launch the kernel dim3 threads = 256; dim3 blocks = max(1, min(256, n/threads.x)); ax_kernel<<<blocks, threads>>>(n, alpha, d_y); // wait until the device is ready and stop the timer CHECK_CUDA_ERROR(cudaDeviceSynchronize()); struct timespec ts_stop; clock_gettime(CLOCK_MONOTONIC, &ts_stop); // calculate metrics double time = ts_stop.tv_sec - ts_start.tv_sec + 1.0e-9*(ts_stop.tv_nsec - ts_start.tv_nsec); printf("Time = %f s\n", time); printf("Floprate = %.1f GFlops\n", 1.0E-9 * n / time); printf("Memory throughput = %.0f GB/s\n", 1.0E-9 * 2 * n * sizeof(double) / time); // free the allocated memory free(y); free(_y); CHECK_CUDA_ERROR(cudaFree(d_y)); }
531547a1f4baedf72323b8a3d83c5d86693f3c85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> //#include <cutil.h> #include "coefficient.h" #define THREAD_NUM 8 #define BLOCK_NUM 1 #define ARRAY_LENGTH PRICE_LIST_SIZE #define BLOCK_SIZE 8 const int threadsPerBlock = 256; static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE ); }} //__global__ cuda_function_test(){ // cuprintf("call cuda function"); //} #define N 10 __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // handle the data at this index if (tid < N) c[tid] = a[tid] + b[tid]; } __global__ void dot(float* input1, float* input2, float * output) { //@@ Load a segment of the input vector into shared memory __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<ARRAY_LENGTH){ temp += input1[tid]*input2[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0){ if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+1]; __syncthreads(); i /= 2; } if(cacheIndex == 0) output[blockIdx.x] = cache[0]; return; } __global__ void sumSquare(float * input, float * output) { //@@ Load a segment of the input vector into shared memory __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<ARRAY_LENGTH){ temp += input[tid]*input[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0){ if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+1]; __syncthreads(); i /= 2; } if(cacheIndex == 0) output[blockIdx.x] = cache[0]; return; } __global__ void sum(float * input, float * output) { //@@ Load a segment of the input vector into shared memory __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<ARRAY_LENGTH){ temp += input[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0){ if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+1]; __syncthreads(); i /= 2; } if(cacheIndex == 0) output[blockIdx.x] = cache[0]; return; } //fenzi = sum(X .* Y) - (sum(X) * sum(Y)) / length(X); //fenmu = sqrt((sum(X .^2) - sum(X)^2 / length(X)) * (sum(Y .^2) - sum(Y)^2 / length(X))); //coeff = fenzi / fenmu; extern "C" float coefficient_single(float* a, float* b){ float* dev_a = a; float* dev_b = b; // hipMalloc(&dev_a, sizeof(float) * ARRAY_LENGTH); // hipMalloc(&dev_b, sizeof(float) * ARRAY_LENGTH); // hipMemcpy(dev_a, a, sizeof(float) * ARRAY_LENGTH, hipMemcpyHostToDevice); // hipMemcpy(dev_b, b, sizeof(float) * ARRAY_LENGTH, hipMemcpyHostToDevice); float* dev_o; float* host_o = (float*) malloc(BLOCK_NUM* sizeof(float)); hipMalloc(&dev_o, sizeof(float) * BLOCK_NUM); // do following // dot x.*y hipLaunchKernelGGL(( dot) , dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, dev_a, dev_b, dev_o); hipMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, hipMemcpyDeviceToHost); float dot = 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { dot += host_o[ii]; } //cout << "dot " << dot << endl; // sum x hipLaunchKernelGGL(( sum), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, dev_a, dev_o); hipMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, hipMemcpyDeviceToHost); float sum_x= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_x += host_o[ii]; } //cout << "sum_x" << sum_x << endl; // sum y hipLaunchKernelGGL(( sum), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, dev_b, dev_o); hipMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, hipMemcpyDeviceToHost); float sum_y= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_y += host_o[ii]; } //cout << "sum_y" << sum_y << endl; // sum y.*2 hipLaunchKernelGGL(( sum), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, dev_b, dev_o); hipMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, hipMemcpyDeviceToHost); float sum_y_2= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_y_2 += host_o[ii]; } //cout << "sum_y_2" << sum_y_2 << endl; // sum x.*2 hipLaunchKernelGGL(( sum), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, dev_a, dev_o); hipMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, hipMemcpyDeviceToHost); float sum_x_2= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_x_2 += host_o[ii]; } //cout << "sum_x_2" << sum_x_2 << endl; //fenmu = sqrt((sum(X .^2) - sum(X)^2 / length(X)) * (sum(Y .^2) - sum(Y)^2 / length(X))); float fenzi = (dot-sum_x*sum_y)/ARRAY_LENGTH; float fenmu = sqrt((sum_x_2-sum_x*sum_x/ARRAY_LENGTH)*(sum_y_2-sum_y*sum_y/ARRAY_LENGTH)); // hipFree(dev_a); // hipFree(dev_b); hipFree(dev_o); free(host_o); return fenzi/fenmu; } extern "C" void coefficient(stockPriceMap* data){ int loop = SERVER_STOCK; float* array[loop]; for(int i=0; i<loop; ++i){ float a_c[ARRAY_LENGTH]; const deque<int>& q_a = (*data)[i]; for(int c=0; c<ARRAY_LENGTH; ++c){ a_c[c]=q_a[c]; } hipMalloc(&array[i], sizeof(float) * ARRAY_LENGTH); hipMemcpy(array[i], a_c, sizeof(float) * ARRAY_LENGTH, hipMemcpyHostToDevice); } cout << "start coefficient calculate" << endl; float min_cof = 1; int stock_x, stock_y; for(int i=0; i<loop; ++i){ for(int j=i+1; j<loop; ++j){ float val = coefficient_single(array[i], array[j]); val = fabs(val); if((val)<min_cof){ stock_x = i; stock_y = j; min_cof = (val); } } } cout << "Min coefficient factor:" << min_cof << " with stock:" << stock_x << " and stock:" << stock_y << endl; for(int i=0; i<loop; ++i){ hipFree(array[loop]); } abort(); //stockPriceMap::const_iterator s = data->begin(), e = data->end(); //for(; s!=e; ++s){ // cout <<"stock id:" << s->first << " " << s->second.size() << endl; //} } extern "C" void cuda_test(){ /// int deviceCount; /// CUDA_SAFE_CALL(hipGetDeviceCount(&deviceCount)); /// if(deviceCount == 0) /// printf("There is no device support CUDA(GPU)"); /// int dev; /// for(dev=0; dev<deviceCount; ++dev){ /// /// } int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_c, N * sizeof(int) ) ); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ) ); add << <N, 1 >> >( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ) ); // display the results for (int i=0; i<N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); }
531547a1f4baedf72323b8a3d83c5d86693f3c85.cu
#include <stdio.h> //#include <cutil.h> #include "coefficient.h" #define THREAD_NUM 8 #define BLOCK_NUM 1 #define ARRAY_LENGTH PRICE_LIST_SIZE #define BLOCK_SIZE 8 const int threadsPerBlock = 256; static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE ); }} //__global__ cuda_function_test(){ // cuprintf("call cuda function"); //} #define N 10 __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // handle the data at this index if (tid < N) c[tid] = a[tid] + b[tid]; } __global__ void dot(float* input1, float* input2, float * output) { //@@ Load a segment of the input vector into shared memory __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<ARRAY_LENGTH){ temp += input1[tid]*input2[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0){ if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+1]; __syncthreads(); i /= 2; } if(cacheIndex == 0) output[blockIdx.x] = cache[0]; return; } __global__ void sumSquare(float * input, float * output) { //@@ Load a segment of the input vector into shared memory __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<ARRAY_LENGTH){ temp += input[tid]*input[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0){ if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+1]; __syncthreads(); i /= 2; } if(cacheIndex == 0) output[blockIdx.x] = cache[0]; return; } __global__ void sum(float * input, float * output) { //@@ Load a segment of the input vector into shared memory __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while(tid<ARRAY_LENGTH){ temp += input[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x/2; while(i!=0){ if(cacheIndex<i) cache[cacheIndex] += cache[cacheIndex+1]; __syncthreads(); i /= 2; } if(cacheIndex == 0) output[blockIdx.x] = cache[0]; return; } //fenzi = sum(X .* Y) - (sum(X) * sum(Y)) / length(X); //fenmu = sqrt((sum(X .^2) - sum(X)^2 / length(X)) * (sum(Y .^2) - sum(Y)^2 / length(X))); //coeff = fenzi / fenmu; extern "C" float coefficient_single(float* a, float* b){ float* dev_a = a; float* dev_b = b; // cudaMalloc(&dev_a, sizeof(float) * ARRAY_LENGTH); // cudaMalloc(&dev_b, sizeof(float) * ARRAY_LENGTH); // cudaMemcpy(dev_a, a, sizeof(float) * ARRAY_LENGTH, cudaMemcpyHostToDevice); // cudaMemcpy(dev_b, b, sizeof(float) * ARRAY_LENGTH, cudaMemcpyHostToDevice); float* dev_o; float* host_o = (float*) malloc(BLOCK_NUM* sizeof(float)); cudaMalloc(&dev_o, sizeof(float) * BLOCK_NUM); // do following // dot x.*y dot <<<BLOCK_NUM, THREAD_NUM>>>(dev_a, dev_b, dev_o); cudaMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, cudaMemcpyDeviceToHost); float dot = 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { dot += host_o[ii]; } //cout << "dot " << dot << endl; // sum x sum<<<BLOCK_NUM, THREAD_NUM>>>(dev_a, dev_o); cudaMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, cudaMemcpyDeviceToHost); float sum_x= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_x += host_o[ii]; } //cout << "sum_x" << sum_x << endl; // sum y sum<<<BLOCK_NUM, THREAD_NUM>>>(dev_b, dev_o); cudaMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, cudaMemcpyDeviceToHost); float sum_y= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_y += host_o[ii]; } //cout << "sum_y" << sum_y << endl; // sum y.*2 sum<<<BLOCK_NUM, THREAD_NUM>>>(dev_b, dev_o); cudaMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, cudaMemcpyDeviceToHost); float sum_y_2= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_y_2 += host_o[ii]; } //cout << "sum_y_2" << sum_y_2 << endl; // sum x.*2 sum<<<BLOCK_NUM, THREAD_NUM>>>(dev_a, dev_o); cudaMemcpy(host_o, dev_o, sizeof(float) * BLOCK_NUM, cudaMemcpyDeviceToHost); float sum_x_2= 0; for (int ii = 0; ii < BLOCK_NUM; ii++) { sum_x_2 += host_o[ii]; } //cout << "sum_x_2" << sum_x_2 << endl; //fenmu = sqrt((sum(X .^2) - sum(X)^2 / length(X)) * (sum(Y .^2) - sum(Y)^2 / length(X))); float fenzi = (dot-sum_x*sum_y)/ARRAY_LENGTH; float fenmu = sqrt((sum_x_2-sum_x*sum_x/ARRAY_LENGTH)*(sum_y_2-sum_y*sum_y/ARRAY_LENGTH)); // cudaFree(dev_a); // cudaFree(dev_b); cudaFree(dev_o); free(host_o); return fenzi/fenmu; } extern "C" void coefficient(stockPriceMap* data){ int loop = SERVER_STOCK; float* array[loop]; for(int i=0; i<loop; ++i){ float a_c[ARRAY_LENGTH]; const deque<int>& q_a = (*data)[i]; for(int c=0; c<ARRAY_LENGTH; ++c){ a_c[c]=q_a[c]; } cudaMalloc(&array[i], sizeof(float) * ARRAY_LENGTH); cudaMemcpy(array[i], a_c, sizeof(float) * ARRAY_LENGTH, cudaMemcpyHostToDevice); } cout << "start coefficient calculate" << endl; float min_cof = 1; int stock_x, stock_y; for(int i=0; i<loop; ++i){ for(int j=i+1; j<loop; ++j){ float val = coefficient_single(array[i], array[j]); val = fabs(val); if((val)<min_cof){ stock_x = i; stock_y = j; min_cof = (val); } } } cout << "Min coefficient factor:" << min_cof << " with stock:" << stock_x << " and stock:" << stock_y << endl; for(int i=0; i<loop; ++i){ cudaFree(array[loop]); } abort(); //stockPriceMap::const_iterator s = data->begin(), e = data->end(); //for(; s!=e; ++s){ // cout <<"stock id:" << s->first << " " << s->second.size() << endl; //} } extern "C" void cuda_test(){ /// int deviceCount; /// CUDA_SAFE_CALL(cudaGetDeviceCount(&deviceCount)); /// if(deviceCount == 0) /// printf("There is no device support CUDA(GPU)"); /// int dev; /// for(dev=0; dev<deviceCount; ++dev){ /// /// } int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) ); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ) ); add << <N, 1 >> >( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ) ); // display the results for (int i=0; i<N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); }
42bddec94f25104283695c18ac6cbc61a685ed42.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // The translation unit for reduction `max` #include <cudf/detail/reduction_functions.hpp> #include "simple_hip.cuh" std::unique_ptr<cudf::scalar> cudf::experimental::reduction::any( column_view const& col, cudf::data_type const output_dtype, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::BOOL8), "any() operation can be applied with output type `bool8` only"); return cudf::experimental::reduction::max(col, cudf::data_type(cudf::BOOL8), mr, stream); }
42bddec94f25104283695c18ac6cbc61a685ed42.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // The translation unit for reduction `max` #include <cudf/detail/reduction_functions.hpp> #include "simple.cuh" std::unique_ptr<cudf::scalar> cudf::experimental::reduction::any( column_view const& col, cudf::data_type const output_dtype, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::BOOL8), "any() operation can be applied with output type `bool8` only"); return cudf::experimental::reduction::max(col, cudf::data_type(cudf::BOOL8), mr, stream); }
64402121d10ce882c574efa501f49c72d31bc56e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void devInverseReindexInt3Bool(int N, int3 *destArray, int3 *srcArray, unsigned int *reindex, int realSize, int nDims, int maxValue, bool ignoreValue) { for (unsigned int n = 0; n < nDims; n++) { int i = blockIdx.x*blockDim.x + threadIdx.x; while (i < N) { int ret = -1; int tmp = srcArray[i + n*realSize].x; if (tmp != -1 || ignoreValue == false) { int addValue = 0; while (tmp >= maxValue) { tmp -= maxValue; addValue += maxValue; } while (tmp < 0) { tmp += maxValue; addValue -= maxValue; } ret = (int) reindex[tmp] + addValue; } destArray[i + n*realSize].x = ret; ret = -1; tmp = srcArray[i + n*realSize].y; if (tmp != -1 || ignoreValue == false) { int addValue = 0; while (tmp >= maxValue) { tmp -= maxValue; addValue += maxValue; } while (tmp < 0) { tmp += maxValue; addValue -= maxValue; } ret = (int) reindex[tmp] + addValue; } destArray[i + n*realSize].y = ret; ret = -1; tmp = srcArray[i + n*realSize].z; if (tmp != -1 || ignoreValue == false) { int addValue = 0; while (tmp >= maxValue) { tmp -= maxValue; addValue += maxValue; } while (tmp < 0) { tmp += maxValue; addValue -= maxValue; } ret = (int) reindex[tmp] + addValue; } destArray[i + n*realSize].z = ret; i += gridDim.x*blockDim.x; } } }
64402121d10ce882c574efa501f49c72d31bc56e.cu
#include "includes.h" __global__ void devInverseReindexInt3Bool(int N, int3 *destArray, int3 *srcArray, unsigned int *reindex, int realSize, int nDims, int maxValue, bool ignoreValue) { for (unsigned int n = 0; n < nDims; n++) { int i = blockIdx.x*blockDim.x + threadIdx.x; while (i < N) { int ret = -1; int tmp = srcArray[i + n*realSize].x; if (tmp != -1 || ignoreValue == false) { int addValue = 0; while (tmp >= maxValue) { tmp -= maxValue; addValue += maxValue; } while (tmp < 0) { tmp += maxValue; addValue -= maxValue; } ret = (int) reindex[tmp] + addValue; } destArray[i + n*realSize].x = ret; ret = -1; tmp = srcArray[i + n*realSize].y; if (tmp != -1 || ignoreValue == false) { int addValue = 0; while (tmp >= maxValue) { tmp -= maxValue; addValue += maxValue; } while (tmp < 0) { tmp += maxValue; addValue -= maxValue; } ret = (int) reindex[tmp] + addValue; } destArray[i + n*realSize].y = ret; ret = -1; tmp = srcArray[i + n*realSize].z; if (tmp != -1 || ignoreValue == false) { int addValue = 0; while (tmp >= maxValue) { tmp -= maxValue; addValue += maxValue; } while (tmp < 0) { tmp += maxValue; addValue -= maxValue; } ret = (int) reindex[tmp] + addValue; } destArray[i + n*realSize].z = ret; i += gridDim.x*blockDim.x; } } }
eeb880cc5f2c7be756bb2eaf3d7926c25602bbf0.hip
// !!! This is a file automatically generated by hipify!!! /* * solver.cu * * Created on: Apr 27, 2014 * Author: mark */ #include "hip/hip_runtime.h" #include <climits> #include "solver.h" #include "pathfinder_common.h" #include "scene.h" __device__ void addToList(nodeList_t *list, node_t *node) { node_t *cur = list->head; list->size++; if (cur) { while (cur->next) cur = cur->next; cur->next = node; } else list->head = node; } __device__ node_t *removeCheapest(nodeList_t *list) { // If the list is empty, return if (list->size == 0) return NULL; // Track the cheapest node node_t *cheapest = list->head; node_t *prevToCheap = NULL; // Iteration variables node_t *curNode = cheapest->next; node_t *prevNode = cheapest; while (curNode) { if (curNode->f < cheapest->f) { cheapest = curNode; prevToCheap = prevNode; } prevNode = curNode; curNode = curNode->next; } // Remove the cheapest node if (prevToCheap) // We're not at the head of the list prevToCheap->next = cheapest->next; else // Assign the head to be the next node list->head = cheapest->next; // Invalidate the next pointer, as we're no longer in a list cheapest->next = NULL; list->size--; return cheapest; } /* Returns true if the specified coordinates removed a node from this list, or if it was not in the list. False otherwise */ __device__ bool scanList(nodeList_t *list, int x, int y, int cost) { // Initialize the current list to the head node_t *curNode = list->head; // Keep track of the previous list node_t *prevNode = NULL; // Assume it's not in the list bool removed = true; // While we haven't reached the end of the list while (curNode) { // If the node is in the same position if (curNode->x == x && curNode->y == y) { // Assume that we aren't removing it yet removed = false; // If the cost of the new node is less than the one in the list, remove the one on the list if (cost < curNode->f) { // We know we're removing it removed = true; // If we're at the head of the list, reassign the new head if (prevNode == NULL) list->head = curNode->next; else prevNode->next = curNode->next; debugPrintf("curNode->next %p\n", curNode->next); // Free the node free(curNode); list->size--; // There will only be one break; } } prevNode = curNode; curNode = curNode->next; } return removed; } __device__ void freeList(nodeList_t *list) { node_t *curNode = list->head; node_t *temp = NULL; // While we have a valid list while (curNode) { // Mark the next node temp = curNode->next; // Free our node free(curNode); // Go to the next node curNode = temp; } } __device__ void buildPath(nodeList_t *list, node_t *end, node_t *start) { // Build the list in reverse // Make an identical node to the end node_t *curNode = (node_t *) malloc(sizeof(node_t)); curNode->f = end->f; curNode->g = end->g; curNode->h = end->h; curNode->next = NULL; curNode->parent = NULL; curNode->x = end->x; curNode->y = end->y; // stick it on the list list->head = curNode; list->size = 1; // If end and start are the same, we're done if (end->x == start->x && end->y == start->y) return; // Otherwise node_t *prevNode = end->parent; while (prevNode) { debugPrintf("parent: %d,%d\n", prevNode->x, prevNode->y); // Allocate a new node node_t *newNode = (node_t *) malloc(sizeof(node_t)); curNode->parent = newNode; newNode->next = curNode; newNode->parent = NULL; newNode->f = prevNode->f; newNode->g = prevNode->g; newNode->h = prevNode->h; newNode->x = prevNode->x; newNode->y = prevNode->y; curNode = newNode; // The size is incremented list->size++; // Set the new head list->head = curNode; // Get the next node prevNode = prevNode->parent; } debugPrintf("List head coords: %d,%d\n", list->head->x, list->head->y); } __device__ nodeList_t *aStar(point_t *grid, human_t *human, int maxWidth, int maxHeight) { node_t *start = (node_t *) malloc(sizeof(node_t)); start->parent = NULL; start->next = NULL; start->x = human->posX; start->y = human->posY; start->g = 0; start->h = abs(start->x - human->goalX) + abs(start->y - human->goalY); start->f = start->h; nodeList_t *openList = (nodeList_t *) malloc(sizeof(nodeList_t)); openList->head = start; openList->size = 1; nodeList_t *closedList = (nodeList_t *) malloc(sizeof(nodeList_t)); closedList->head = NULL; closedList->size = 0; nodeList_t *pathList = NULL; debugPrintf("initialized\n"); while (openList->size > 0) { node_t *q = removeCheapest(openList); debugPrintf("%d,%d | %d\n", q->x, q->y, q->f); // Let's see if this was the goal if (q->x == human->goalX && q->y == human->goalY) { debugPrintf("goal reached: %d,%d\n", q->x, q->y); pathList = (nodeList_t *) malloc(sizeof(nodeList_t)); buildPath(pathList, q, start); break; } int qx = q->x, qy = q->y; // For each successor for (int yOff = -1; yOff <= 1; yOff++) { for (int xOff = -1; xOff <= 1; xOff++) { // The center isn't a successor if (xOff == 0 && yOff == 0) continue; // Make sure it is within bounds if (xOff + qx >= maxWidth || yOff + qy >= maxHeight || xOff + qx < 0 || yOff + qy < 0) continue; // Get the point int px = qx + xOff; int py = qy + yOff; debugPrintf("point coords: %d,%d\n", px, py); point_t point = grid[px + py * maxWidth]; // If it's not a path or goal, skip it // TODO: go through humans if (point.type != TPATH && point.type != TEND) continue; int gCost = abs(xOff) + abs(yOff) + q->g; // We can spend a little more time computing the best path int hCost = abs(px - human->goalX) + abs(py - human->goalY); int fCost = gCost + hCost; debugPrintf("g: %d, h: %d, f: %d\n", gCost, hCost, fCost); debugPrintf("scanning list\n"); bool oRem = scanList(openList, px, py, fCost); bool cRem = scanList(closedList, px, py, fCost); debugPrintf("removed: %d\n", oRem && cRem); // If it was removed from both lists, then add it to open if (oRem && cRem) { node_t *newNode = (node_t *) malloc(sizeof(node_t)); newNode->parent = q; newNode->next = NULL; newNode->x = px; newNode->y = py; newNode->g = gCost; newNode->h = hCost; newNode->f = fCost; addToList(openList, newNode); debugPrintf("added\n"); } debugPrintf("%d,%d | %d\n", px, py, point.type); } } } debugPrintf("Open list is empty or goal?\n"); freeList(openList); freeList(closedList); return pathList; } __device__ void printPath(nodeList_t *list) { node_t *cur = list->head; while (cur) { if (cur->next) printf("(%d,%d)->", cur->x, cur->y); else printf("(%d,%d)G", cur->x, cur->y); cur = cur->next; } printf("\n"); } __device__ bool moveHuman(point_t *grid, human_t *human, nodeList_t *path, int width, int *remainingHumans) { // Get our next point node_t *node = path->head->next; // Get the point on the grid it should be point_t *point = &grid[node->x + node->y * width]; point_t *curPoint = &grid[human->posX + human->posY * width]; // Try to reserve this point bool swapped = false; int oldType = atomicCAS(&point->type, TPATH, THUM); swapped = (oldType == TPATH); // If the point is a goal, that's fine too if (!swapped && oldType == TEND) swapped = ((oldType = atomicCAS(&point->type, TEND, THUM)) == TEND); // If we swapped, then update mark our last position as empty if (swapped) { atomicCAS(&curPoint->type, THUM, oldType); // Update our position human->posX = node->x; human->posY = node->y; // If we have arrived, mark this human as solved if (human->posX == human->goalX && human->posY == human->goalY) atomicSub(remainingHumans, 1); } return swapped; } __device__ void addToStatsPath(nodeList_t *path, node_t *node) { // Allocate a new node for this node_t *newNode = (node_t *) malloc(sizeof(node_t)); // Set its position and cost parameters if (node != NULL) { newNode->f = node->f; newNode->g = node->g; newNode->h = node->h; newNode->x = node->x; newNode->y = node->y; } else { // Duplicate the last node in this list node_t *cur = path->head; while (cur->next) cur = cur->next; newNode->f = cur->f; newNode->g = cur->g; newNode->h = cur->h; newNode->x = cur->x; newNode->y = cur->y; } // Common to both paths newNode->next = NULL; newNode->parent = NULL; // Add it to the list if (path->head == NULL) { // The list is empty path->head = newNode; } else { // The list is not empty node_t *cur = path->head; // Skip to the last item while (cur->next) cur = cur->next; // Set the node cur->next = newNode; newNode->parent = cur; } path->size++; } __device__ void transferPath(stat_t *stats, int id, void *results, int width, int height) { debugPrintf("%p, %lu\n", results, sizeof(char)); // Get our offset into the results array, which we'll treat as rows of results int resultWidth = width * height * sizeof(simple_point_t) + 2 * sizeof(int); debugPrintf("width: %d\n", resultWidth); void *rRow = (void *) (((char *) results) + resultWidth * id); debugPrintf("base: %p, row: %p, diff: %d, id: %d\n", results, rRow, ((char * ) rRow) - ((char * ) results), id); // Write the stats to the results node_t *curNode = stats->path->head; // Mark the result we are writing unsigned int pos = 0; while (curNode) { // Get the current result simple_point_t *point = ((simple_point_t *) rRow) + pos++; // Set is position point->x = curNode->x; point->y = curNode->y; curNode = curNode->next; } // Go to the end of the row and write how many elements we read and the collision count unsigned int *nums = (unsigned int *) (((char *) rRow) + (resultWidth - sizeof(int) * 2)); debugPrintf("ints: %p, diff: %d\n", nums, ((char * ) nums) - ((char * ) rRow)); nums[0] = stats->collisions; nums[1] = pos; // Finally, free the paths and stats object free(stats->path); free(stats); } __global__ void solveScene(point_t *grid, human_t *humans, stat_t *stats, nodeList_t **paths, int maxWidth, int maxHeight, int numHumans, int *remainingHumans, void *results) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= numHumans) return; // No human to work on // Initialize the stats if (id < numHumans && stats[id].path == NULL) { stats[id].path = (nodeList_t *) malloc(sizeof(nodeList_t)); stats[id].path->head = NULL; stats[id].path->size = 0; // Make a start node for the stats path node_t node; node.parent = NULL; node.next = NULL; node.x = humans[id].posX; node.y = humans[id].posY; node.g = 0; node.h = (humans[id].posX - humans[id].goalX) * (humans[id].posX - humans[id].goalX) + (humans[id].posY - humans[id].goalY) * (humans[id].posY - humans[id].goalY); node.f = node.h; addToStatsPath(stats[id].path, &node); } // Get our current human human_t *hum = &humans[id]; // If our human has reached the end, return if (hum->posX == hum->goalX && hum->posY == hum->goalY) { // Free the paths list if (paths[id] != NULL) { transferPath(&stats[id], id, results, maxWidth, maxHeight); freeList(paths[id]); paths[id] = NULL; } return; } else { nodeList_t *path; // Compute a new path or just use the already existing one if (paths[id] != NULL) { path = paths[id]; } else { path = aStar(grid, hum, maxWidth, maxHeight); paths[id] = path; } // We found a path, so move a human if (path != NULL) { bool moved = moveHuman(grid, hum, path, maxWidth, remainingHumans); // If we moved, add this path to the final path if (moved) { node_t *nextNode = path->head->next; addToStatsPath(stats[id].path, nextNode); // head->next is now our head as that's where we moved node_t *toFree = path->head; path->head = path->head->next; free(toFree); debugPrintf("new position: %d,%d\n", hum->posX, hum->posY); } else { // Stall stats[id].collisions++; node_t *nextNode = path->head; addToStatsPath(stats[id].path, nextNode); debugPrintf("id %d blocked\n", id); } } else { // Stall addToStatsPath(stats[id].path, NULL); } } }
eeb880cc5f2c7be756bb2eaf3d7926c25602bbf0.cu
/* * solver.cu * * Created on: Apr 27, 2014 * Author: mark */ #include "cuda.h" #include <climits> #include "solver.h" #include "pathfinder_common.h" #include "scene.h" __device__ void addToList(nodeList_t *list, node_t *node) { node_t *cur = list->head; list->size++; if (cur) { while (cur->next) cur = cur->next; cur->next = node; } else list->head = node; } __device__ node_t *removeCheapest(nodeList_t *list) { // If the list is empty, return if (list->size == 0) return NULL; // Track the cheapest node node_t *cheapest = list->head; node_t *prevToCheap = NULL; // Iteration variables node_t *curNode = cheapest->next; node_t *prevNode = cheapest; while (curNode) { if (curNode->f < cheapest->f) { cheapest = curNode; prevToCheap = prevNode; } prevNode = curNode; curNode = curNode->next; } // Remove the cheapest node if (prevToCheap) // We're not at the head of the list prevToCheap->next = cheapest->next; else // Assign the head to be the next node list->head = cheapest->next; // Invalidate the next pointer, as we're no longer in a list cheapest->next = NULL; list->size--; return cheapest; } /* Returns true if the specified coordinates removed a node from this list, or if it was not in the list. False otherwise */ __device__ bool scanList(nodeList_t *list, int x, int y, int cost) { // Initialize the current list to the head node_t *curNode = list->head; // Keep track of the previous list node_t *prevNode = NULL; // Assume it's not in the list bool removed = true; // While we haven't reached the end of the list while (curNode) { // If the node is in the same position if (curNode->x == x && curNode->y == y) { // Assume that we aren't removing it yet removed = false; // If the cost of the new node is less than the one in the list, remove the one on the list if (cost < curNode->f) { // We know we're removing it removed = true; // If we're at the head of the list, reassign the new head if (prevNode == NULL) list->head = curNode->next; else prevNode->next = curNode->next; debugPrintf("curNode->next %p\n", curNode->next); // Free the node free(curNode); list->size--; // There will only be one break; } } prevNode = curNode; curNode = curNode->next; } return removed; } __device__ void freeList(nodeList_t *list) { node_t *curNode = list->head; node_t *temp = NULL; // While we have a valid list while (curNode) { // Mark the next node temp = curNode->next; // Free our node free(curNode); // Go to the next node curNode = temp; } } __device__ void buildPath(nodeList_t *list, node_t *end, node_t *start) { // Build the list in reverse // Make an identical node to the end node_t *curNode = (node_t *) malloc(sizeof(node_t)); curNode->f = end->f; curNode->g = end->g; curNode->h = end->h; curNode->next = NULL; curNode->parent = NULL; curNode->x = end->x; curNode->y = end->y; // stick it on the list list->head = curNode; list->size = 1; // If end and start are the same, we're done if (end->x == start->x && end->y == start->y) return; // Otherwise node_t *prevNode = end->parent; while (prevNode) { debugPrintf("parent: %d,%d\n", prevNode->x, prevNode->y); // Allocate a new node node_t *newNode = (node_t *) malloc(sizeof(node_t)); curNode->parent = newNode; newNode->next = curNode; newNode->parent = NULL; newNode->f = prevNode->f; newNode->g = prevNode->g; newNode->h = prevNode->h; newNode->x = prevNode->x; newNode->y = prevNode->y; curNode = newNode; // The size is incremented list->size++; // Set the new head list->head = curNode; // Get the next node prevNode = prevNode->parent; } debugPrintf("List head coords: %d,%d\n", list->head->x, list->head->y); } __device__ nodeList_t *aStar(point_t *grid, human_t *human, int maxWidth, int maxHeight) { node_t *start = (node_t *) malloc(sizeof(node_t)); start->parent = NULL; start->next = NULL; start->x = human->posX; start->y = human->posY; start->g = 0; start->h = abs(start->x - human->goalX) + abs(start->y - human->goalY); start->f = start->h; nodeList_t *openList = (nodeList_t *) malloc(sizeof(nodeList_t)); openList->head = start; openList->size = 1; nodeList_t *closedList = (nodeList_t *) malloc(sizeof(nodeList_t)); closedList->head = NULL; closedList->size = 0; nodeList_t *pathList = NULL; debugPrintf("initialized\n"); while (openList->size > 0) { node_t *q = removeCheapest(openList); debugPrintf("%d,%d | %d\n", q->x, q->y, q->f); // Let's see if this was the goal if (q->x == human->goalX && q->y == human->goalY) { debugPrintf("goal reached: %d,%d\n", q->x, q->y); pathList = (nodeList_t *) malloc(sizeof(nodeList_t)); buildPath(pathList, q, start); break; } int qx = q->x, qy = q->y; // For each successor for (int yOff = -1; yOff <= 1; yOff++) { for (int xOff = -1; xOff <= 1; xOff++) { // The center isn't a successor if (xOff == 0 && yOff == 0) continue; // Make sure it is within bounds if (xOff + qx >= maxWidth || yOff + qy >= maxHeight || xOff + qx < 0 || yOff + qy < 0) continue; // Get the point int px = qx + xOff; int py = qy + yOff; debugPrintf("point coords: %d,%d\n", px, py); point_t point = grid[px + py * maxWidth]; // If it's not a path or goal, skip it // TODO: go through humans if (point.type != TPATH && point.type != TEND) continue; int gCost = abs(xOff) + abs(yOff) + q->g; // We can spend a little more time computing the best path int hCost = abs(px - human->goalX) + abs(py - human->goalY); int fCost = gCost + hCost; debugPrintf("g: %d, h: %d, f: %d\n", gCost, hCost, fCost); debugPrintf("scanning list\n"); bool oRem = scanList(openList, px, py, fCost); bool cRem = scanList(closedList, px, py, fCost); debugPrintf("removed: %d\n", oRem && cRem); // If it was removed from both lists, then add it to open if (oRem && cRem) { node_t *newNode = (node_t *) malloc(sizeof(node_t)); newNode->parent = q; newNode->next = NULL; newNode->x = px; newNode->y = py; newNode->g = gCost; newNode->h = hCost; newNode->f = fCost; addToList(openList, newNode); debugPrintf("added\n"); } debugPrintf("%d,%d | %d\n", px, py, point.type); } } } debugPrintf("Open list is empty or goal?\n"); freeList(openList); freeList(closedList); return pathList; } __device__ void printPath(nodeList_t *list) { node_t *cur = list->head; while (cur) { if (cur->next) printf("(%d,%d)->", cur->x, cur->y); else printf("(%d,%d)G", cur->x, cur->y); cur = cur->next; } printf("\n"); } __device__ bool moveHuman(point_t *grid, human_t *human, nodeList_t *path, int width, int *remainingHumans) { // Get our next point node_t *node = path->head->next; // Get the point on the grid it should be point_t *point = &grid[node->x + node->y * width]; point_t *curPoint = &grid[human->posX + human->posY * width]; // Try to reserve this point bool swapped = false; int oldType = atomicCAS(&point->type, TPATH, THUM); swapped = (oldType == TPATH); // If the point is a goal, that's fine too if (!swapped && oldType == TEND) swapped = ((oldType = atomicCAS(&point->type, TEND, THUM)) == TEND); // If we swapped, then update mark our last position as empty if (swapped) { atomicCAS(&curPoint->type, THUM, oldType); // Update our position human->posX = node->x; human->posY = node->y; // If we have arrived, mark this human as solved if (human->posX == human->goalX && human->posY == human->goalY) atomicSub(remainingHumans, 1); } return swapped; } __device__ void addToStatsPath(nodeList_t *path, node_t *node) { // Allocate a new node for this node_t *newNode = (node_t *) malloc(sizeof(node_t)); // Set its position and cost parameters if (node != NULL) { newNode->f = node->f; newNode->g = node->g; newNode->h = node->h; newNode->x = node->x; newNode->y = node->y; } else { // Duplicate the last node in this list node_t *cur = path->head; while (cur->next) cur = cur->next; newNode->f = cur->f; newNode->g = cur->g; newNode->h = cur->h; newNode->x = cur->x; newNode->y = cur->y; } // Common to both paths newNode->next = NULL; newNode->parent = NULL; // Add it to the list if (path->head == NULL) { // The list is empty path->head = newNode; } else { // The list is not empty node_t *cur = path->head; // Skip to the last item while (cur->next) cur = cur->next; // Set the node cur->next = newNode; newNode->parent = cur; } path->size++; } __device__ void transferPath(stat_t *stats, int id, void *results, int width, int height) { debugPrintf("%p, %lu\n", results, sizeof(char)); // Get our offset into the results array, which we'll treat as rows of results int resultWidth = width * height * sizeof(simple_point_t) + 2 * sizeof(int); debugPrintf("width: %d\n", resultWidth); void *rRow = (void *) (((char *) results) + resultWidth * id); debugPrintf("base: %p, row: %p, diff: %d, id: %d\n", results, rRow, ((char * ) rRow) - ((char * ) results), id); // Write the stats to the results node_t *curNode = stats->path->head; // Mark the result we are writing unsigned int pos = 0; while (curNode) { // Get the current result simple_point_t *point = ((simple_point_t *) rRow) + pos++; // Set is position point->x = curNode->x; point->y = curNode->y; curNode = curNode->next; } // Go to the end of the row and write how many elements we read and the collision count unsigned int *nums = (unsigned int *) (((char *) rRow) + (resultWidth - sizeof(int) * 2)); debugPrintf("ints: %p, diff: %d\n", nums, ((char * ) nums) - ((char * ) rRow)); nums[0] = stats->collisions; nums[1] = pos; // Finally, free the paths and stats object free(stats->path); free(stats); } __global__ void solveScene(point_t *grid, human_t *humans, stat_t *stats, nodeList_t **paths, int maxWidth, int maxHeight, int numHumans, int *remainingHumans, void *results) { int id = threadIdx.x + blockDim.x * blockIdx.x; if (id >= numHumans) return; // No human to work on // Initialize the stats if (id < numHumans && stats[id].path == NULL) { stats[id].path = (nodeList_t *) malloc(sizeof(nodeList_t)); stats[id].path->head = NULL; stats[id].path->size = 0; // Make a start node for the stats path node_t node; node.parent = NULL; node.next = NULL; node.x = humans[id].posX; node.y = humans[id].posY; node.g = 0; node.h = (humans[id].posX - humans[id].goalX) * (humans[id].posX - humans[id].goalX) + (humans[id].posY - humans[id].goalY) * (humans[id].posY - humans[id].goalY); node.f = node.h; addToStatsPath(stats[id].path, &node); } // Get our current human human_t *hum = &humans[id]; // If our human has reached the end, return if (hum->posX == hum->goalX && hum->posY == hum->goalY) { // Free the paths list if (paths[id] != NULL) { transferPath(&stats[id], id, results, maxWidth, maxHeight); freeList(paths[id]); paths[id] = NULL; } return; } else { nodeList_t *path; // Compute a new path or just use the already existing one if (paths[id] != NULL) { path = paths[id]; } else { path = aStar(grid, hum, maxWidth, maxHeight); paths[id] = path; } // We found a path, so move a human if (path != NULL) { bool moved = moveHuman(grid, hum, path, maxWidth, remainingHumans); // If we moved, add this path to the final path if (moved) { node_t *nextNode = path->head->next; addToStatsPath(stats[id].path, nextNode); // head->next is now our head as that's where we moved node_t *toFree = path->head; path->head = path->head->next; free(toFree); debugPrintf("new position: %d,%d\n", hum->posX, hum->posY); } else { // Stall stats[id].collisions++; node_t *nextNode = path->head; addToStatsPath(stats[id].path, nextNode); debugPrintf("id %d blocked\n", id); } } else { // Stall addToStatsPath(stats[id].path, NULL); } } }
bab3940e0e736dd9c669554e11983c7aaf055e06.hip
// !!! This is a file automatically generated by hipify!!! #include "../catch.hpp" #define TEST_CUDA_CHECK_RETURN //-------------------------------------------------------------- #include <thrust/device_vector.h> #include "../BaseCudaTestHandler.h" #include "../../GPUPatternMining.Contract/IPairColocationsProvider.h" #include "../../GPUPatternMining/HashMap/gpuhashmapper.h" #include "../../GPUPatternMining/Common/MiningCommon.h" //-------------------------------------------------------------- /* Initializes data according to grap in PB.pdf on page 117 Memory allocated on device will be released in onTeardown method by hipDeviceReset so we don't have to worry about that in tests */ void initData(UIntTableGpuHashMapPtr& pairColocationInstancesListMap, UIntGpuHashMapPtr& pairColocationInstancesCountMap) { constexpr size_t ntc = 6; // nodesTypesCount // hashSize = ((ntc^ntc) - ntc) / 2 + ntc = |upper right part of matrix with diagonal| constexpr size_t hashSize = (ntc * ntc - ntc) / 2 + ntc; auto intKeyProcessor = std::make_shared<GPUUIntKeyProcessor>(); pairColocationInstancesListMap = std::make_shared<UIntTableGpuHashMap>(hashSize, intKeyProcessor.get()); pairColocationInstancesCountMap = std::make_shared<UIntGpuHashMap>(hashSize, intKeyProcessor.get()); // keys size_t keyTableSize = 15 * uintSize; unsigned int h_keys[15] = { 0xAA, 0xAB, 0xAC, 0xAD, 0xAF, 0xBB, 0xBC, 0xBD, 0xBF, 0xCC, 0xCD, 0xCF, 0xDD, 0xDF, 0xFF }; unsigned int* c_keys; hipMalloc(reinterpret_cast<void**>(&c_keys), keyTableSize); hipMemcpy(c_keys, h_keys, keyTableSize, hipMemcpyHostToDevice); // instances count size_t pairColocationsCountSize = 15 * uintSize; //values in this table are {instances count} * 2 beacuse one instance is represented as two uInts unsigned int h_pairColocationsInstancesCount[15] = { 0, 6, 12, 4, 6, 0, 6, 2, 2, 0, 0, 0, 0, 4, 0 }; unsigned int *c_pairColocationsInstancesCount; hipMalloc(reinterpret_cast<void**>(&c_pairColocationsInstancesCount), pairColocationsCountSize); hipMemcpy(c_pairColocationsInstancesCount, h_pairColocationsInstancesCount, pairColocationsCountSize, hipMemcpyHostToDevice); pairColocationInstancesCountMap->insertKeyValuePairs(c_keys, c_pairColocationsInstancesCount, 15); // instances lists constexpr size_t instancesCount = 42; size_t pairColocationsSize = instancesCount * uintSize; unsigned int h_instancesLists[instancesCount] = { //AA 1,2, 2,4, 3,4, //AB 1,1, 1,2, 2,3, 3,1, 3,2, 3,3, //AC 1,1, 2,2, //AD 1,4, 1,2, 2,2, //AF //BB 2,2, 4,2, 4,3, //BC 2,1, //BD 2,2, //BF //CC //CD //CF //DD 1,2, 2,2 //DF //FF }; unsigned int* c_instancesList; hipMalloc(reinterpret_cast<void**>(&c_instancesList), pairColocationsSize); hipMemcpy(c_instancesList, h_instancesLists, pairColocationsSize, hipMemcpyHostToDevice); size_t keyInstanceListTableSize = 15 * uintPtrSize; UInt* h_keyInstanceListTable[15] = { NULL, //AA c_instancesList, //AB c_instancesList + 6, //AC c_instancesList + 18, //AD c_instancesList + 22, //AF NULL, //BB c_instancesList + 28, //BC c_instancesList + 34, //BD c_instancesList + 36, //BF NULL, //CC NULL, //CD NULL, //CF NULL, //DD c_instancesList + 38, //DF NULL, //FF }; UInt** c_keyInstanceListTable; CUDA_CHECK_RETURN(hipMalloc(reinterpret_cast<void**>(&c_keyInstanceListTable), keyInstanceListTableSize)); CUDA_CHECK_RETURN(hipMemcpy(c_keyInstanceListTable, h_keyInstanceListTable, keyInstanceListTableSize, hipMemcpyHostToDevice)); pairColocationInstancesListMap->insertKeyValuePairs(c_keys, c_keyInstanceListTable, 15); } TEST_CASE_METHOD(BaseCudaTestHandler, "Simple initial data test 00", "Prevalence index test data test") { UIntTableGpuHashMapPtr colocationInstancesListMap; UIntGpuHashMapPtr colocationInstancesCountMap; initData(colocationInstancesListMap, colocationInstancesCountMap); unsigned int h_key[] = { 0xCC }; unsigned int* c_key; hipMalloc(reinterpret_cast<void**>(&c_key), uintSize); hipMemcpy(c_key, h_key, uintSize, hipMemcpyHostToDevice); unsigned int h_instanceCount; unsigned int* c_instancesCount; hipMalloc(reinterpret_cast<void**>(&c_instancesCount), uintSize); colocationInstancesCountMap->getValues(c_key, c_instancesCount, 1); hipMemcpy(&h_instanceCount, c_instancesCount, uintSize, hipMemcpyDeviceToHost); REQUIRE(h_instanceCount == 0); unsigned int** c_instancesListPtr; hipMalloc(reinterpret_cast<void**>(&c_instancesListPtr), uintPtrSize); colocationInstancesListMap->getValues(c_key, c_instancesListPtr, 1); unsigned int* h_instanceListPtr; hipMemcpy(&h_instanceListPtr, c_instancesListPtr, uintPtrSize, hipMemcpyDeviceToHost); REQUIRE(h_instanceListPtr == NULL); } TEST_CASE_METHOD(BaseCudaTestHandler, "Simple initial data test 01", "Prevalence index test data test") { UIntTableGpuHashMapPtr colocationInstancesListMap; UIntGpuHashMapPtr colocationInstancesCountMap; initData(colocationInstancesListMap, colocationInstancesCountMap); unsigned int h_key[] = { 0xAC }; unsigned int* c_key; hipMalloc(reinterpret_cast<void**>(&c_key), uintSize); hipMemcpy(c_key, h_key, uintSize, hipMemcpyHostToDevice); unsigned int h_instanceCount; unsigned int* c_instancesCount; hipMalloc(reinterpret_cast<void**>(&c_instancesCount), uintSize); colocationInstancesCountMap->getValues(c_key, c_instancesCount, 1); hipMemcpy(&h_instanceCount, c_instancesCount, uintSize, hipMemcpyDeviceToHost); REQUIRE(h_instanceCount == 6 * 2); std::shared_ptr<unsigned int> h_instances(new unsigned int[h_instanceCount], std::default_delete<unsigned int[]>()); unsigned int** c_instancesListPtr; hipMalloc(reinterpret_cast<void**>(&c_instancesListPtr), uintPtrSize); colocationInstancesListMap->getValues(c_key, c_instancesListPtr, 1); unsigned int* h_instanceListPtr; hipMemcpy(&h_instanceListPtr, c_instancesListPtr, uintPtrSize, hipMemcpyDeviceToHost); hipMemcpy(h_instances.get(), h_instanceListPtr, h_instanceCount * uintSize, hipMemcpyDeviceToHost); unsigned int expected[] = { 1,1, 1,2, 2,3, 3,1, 3,2, 3,3 }; REQUIRE(std::equal(std::begin(expected), std::end(expected), h_instances.get()) == true); } TEST_CASE_METHOD(BaseCudaTestHandler, "Prevalence index test 01", "HashMap") { UIntTableGpuHashMapPtr colocationInstancesListMap; UIntGpuHashMapPtr colocationInstancesCountMap; initData(colocationInstancesListMap, colocationInstancesCountMap); REQUIRE(true); }
bab3940e0e736dd9c669554e11983c7aaf055e06.cu
#include "../catch.hpp" #define TEST_CUDA_CHECK_RETURN //-------------------------------------------------------------- #include <thrust/device_vector.h> #include "../BaseCudaTestHandler.h" #include "../../GPUPatternMining.Contract/IPairColocationsProvider.h" #include "../../GPUPatternMining/HashMap/gpuhashmapper.h" #include "../../GPUPatternMining/Common/MiningCommon.h" //-------------------------------------------------------------- /* Initializes data according to grap in PB.pdf on page 117 Memory allocated on device will be released in onTeardown method by cudaDeviceReset so we don't have to worry about that in tests */ void initData(UIntTableGpuHashMapPtr& pairColocationInstancesListMap, UIntGpuHashMapPtr& pairColocationInstancesCountMap) { constexpr size_t ntc = 6; // nodesTypesCount // hashSize = ((ntc^ntc) - ntc) / 2 + ntc = |upper right part of matrix with diagonal| constexpr size_t hashSize = (ntc * ntc - ntc) / 2 + ntc; auto intKeyProcessor = std::make_shared<GPUUIntKeyProcessor>(); pairColocationInstancesListMap = std::make_shared<UIntTableGpuHashMap>(hashSize, intKeyProcessor.get()); pairColocationInstancesCountMap = std::make_shared<UIntGpuHashMap>(hashSize, intKeyProcessor.get()); // keys size_t keyTableSize = 15 * uintSize; unsigned int h_keys[15] = { 0xAA, 0xAB, 0xAC, 0xAD, 0xAF, 0xBB, 0xBC, 0xBD, 0xBF, 0xCC, 0xCD, 0xCF, 0xDD, 0xDF, 0xFF }; unsigned int* c_keys; cudaMalloc(reinterpret_cast<void**>(&c_keys), keyTableSize); cudaMemcpy(c_keys, h_keys, keyTableSize, cudaMemcpyHostToDevice); // instances count size_t pairColocationsCountSize = 15 * uintSize; //values in this table are {instances count} * 2 beacuse one instance is represented as two uInts unsigned int h_pairColocationsInstancesCount[15] = { 0, 6, 12, 4, 6, 0, 6, 2, 2, 0, 0, 0, 0, 4, 0 }; unsigned int *c_pairColocationsInstancesCount; cudaMalloc(reinterpret_cast<void**>(&c_pairColocationsInstancesCount), pairColocationsCountSize); cudaMemcpy(c_pairColocationsInstancesCount, h_pairColocationsInstancesCount, pairColocationsCountSize, cudaMemcpyHostToDevice); pairColocationInstancesCountMap->insertKeyValuePairs(c_keys, c_pairColocationsInstancesCount, 15); // instances lists constexpr size_t instancesCount = 42; size_t pairColocationsSize = instancesCount * uintSize; unsigned int h_instancesLists[instancesCount] = { //AA 1,2, 2,4, 3,4, //AB 1,1, 1,2, 2,3, 3,1, 3,2, 3,3, //AC 1,1, 2,2, //AD 1,4, 1,2, 2,2, //AF //BB 2,2, 4,2, 4,3, //BC 2,1, //BD 2,2, //BF //CC //CD //CF //DD 1,2, 2,2 //DF //FF }; unsigned int* c_instancesList; cudaMalloc(reinterpret_cast<void**>(&c_instancesList), pairColocationsSize); cudaMemcpy(c_instancesList, h_instancesLists, pairColocationsSize, cudaMemcpyHostToDevice); size_t keyInstanceListTableSize = 15 * uintPtrSize; UInt* h_keyInstanceListTable[15] = { NULL, //AA c_instancesList, //AB c_instancesList + 6, //AC c_instancesList + 18, //AD c_instancesList + 22, //AF NULL, //BB c_instancesList + 28, //BC c_instancesList + 34, //BD c_instancesList + 36, //BF NULL, //CC NULL, //CD NULL, //CF NULL, //DD c_instancesList + 38, //DF NULL, //FF }; UInt** c_keyInstanceListTable; CUDA_CHECK_RETURN(cudaMalloc(reinterpret_cast<void**>(&c_keyInstanceListTable), keyInstanceListTableSize)); CUDA_CHECK_RETURN(cudaMemcpy(c_keyInstanceListTable, h_keyInstanceListTable, keyInstanceListTableSize, cudaMemcpyHostToDevice)); pairColocationInstancesListMap->insertKeyValuePairs(c_keys, c_keyInstanceListTable, 15); } TEST_CASE_METHOD(BaseCudaTestHandler, "Simple initial data test 00", "Prevalence index test data test") { UIntTableGpuHashMapPtr colocationInstancesListMap; UIntGpuHashMapPtr colocationInstancesCountMap; initData(colocationInstancesListMap, colocationInstancesCountMap); unsigned int h_key[] = { 0xCC }; unsigned int* c_key; cudaMalloc(reinterpret_cast<void**>(&c_key), uintSize); cudaMemcpy(c_key, h_key, uintSize, cudaMemcpyHostToDevice); unsigned int h_instanceCount; unsigned int* c_instancesCount; cudaMalloc(reinterpret_cast<void**>(&c_instancesCount), uintSize); colocationInstancesCountMap->getValues(c_key, c_instancesCount, 1); cudaMemcpy(&h_instanceCount, c_instancesCount, uintSize, cudaMemcpyDeviceToHost); REQUIRE(h_instanceCount == 0); unsigned int** c_instancesListPtr; cudaMalloc(reinterpret_cast<void**>(&c_instancesListPtr), uintPtrSize); colocationInstancesListMap->getValues(c_key, c_instancesListPtr, 1); unsigned int* h_instanceListPtr; cudaMemcpy(&h_instanceListPtr, c_instancesListPtr, uintPtrSize, cudaMemcpyDeviceToHost); REQUIRE(h_instanceListPtr == NULL); } TEST_CASE_METHOD(BaseCudaTestHandler, "Simple initial data test 01", "Prevalence index test data test") { UIntTableGpuHashMapPtr colocationInstancesListMap; UIntGpuHashMapPtr colocationInstancesCountMap; initData(colocationInstancesListMap, colocationInstancesCountMap); unsigned int h_key[] = { 0xAC }; unsigned int* c_key; cudaMalloc(reinterpret_cast<void**>(&c_key), uintSize); cudaMemcpy(c_key, h_key, uintSize, cudaMemcpyHostToDevice); unsigned int h_instanceCount; unsigned int* c_instancesCount; cudaMalloc(reinterpret_cast<void**>(&c_instancesCount), uintSize); colocationInstancesCountMap->getValues(c_key, c_instancesCount, 1); cudaMemcpy(&h_instanceCount, c_instancesCount, uintSize, cudaMemcpyDeviceToHost); REQUIRE(h_instanceCount == 6 * 2); std::shared_ptr<unsigned int> h_instances(new unsigned int[h_instanceCount], std::default_delete<unsigned int[]>()); unsigned int** c_instancesListPtr; cudaMalloc(reinterpret_cast<void**>(&c_instancesListPtr), uintPtrSize); colocationInstancesListMap->getValues(c_key, c_instancesListPtr, 1); unsigned int* h_instanceListPtr; cudaMemcpy(&h_instanceListPtr, c_instancesListPtr, uintPtrSize, cudaMemcpyDeviceToHost); cudaMemcpy(h_instances.get(), h_instanceListPtr, h_instanceCount * uintSize, cudaMemcpyDeviceToHost); unsigned int expected[] = { 1,1, 1,2, 2,3, 3,1, 3,2, 3,3 }; REQUIRE(std::equal(std::begin(expected), std::end(expected), h_instances.get()) == true); } TEST_CASE_METHOD(BaseCudaTestHandler, "Prevalence index test 01", "HashMap") { UIntTableGpuHashMapPtr colocationInstancesListMap; UIntGpuHashMapPtr colocationInstancesCountMap; initData(colocationInstancesListMap, colocationInstancesCountMap); REQUIRE(true); }
42215d227d324e35ab96954fa9780a36373b179b.hip
// !!! This is a file automatically generated by hipify!!! // Andrew Gloster // February 2019 // Program to solve the 2D Cahn-Hilliard equation on a periodic domain using the ADI method // Outputs timing // Copyright 2019 Andrew Gloster // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // --------------------------------------------------------------------- // Standard Libraries and Headers // --------------------------------------------------------------------- #include <math.h> #include <stdlib.h> #include <stdio.h> #include "hip/hip_runtime.h" #include <rocblas.h> #include "hdf5.h" #include <time.h> // --------------------------------------------------------------------- // Programmer Libraries and Headers // --------------------------------------------------------------------- #include "../../../cuSten/cuSten.h" #include "cuPentBatch.h" #include "BatchHyper.h" // --------------------------------------------------------------------- // MACROS // --------------------------------------------------------------------- // Block sizes for finding RHS #define BLOCK_X_FUN 8 #define BLOCK_Y_FUN 8 #define BLOCK_X 32 #define BLOCK_Y 32 // Block size for inverting #define BLOCK_INV 64 //--------------------------------------------------------------------- // Static functions for use in main program //--------------------------------------------------------------------- // Find cBar for differencing __global__ static void findCBar(double* cOld, double* cCurr, double* cBar, int nx) { // Matrix index int globalIdx = blockDim.x * blockIdx.x + threadIdx.x; int globalIdy = blockDim.y * blockIdx.y + threadIdx.y; // Set index being computed int index = globalIdy * nx + globalIdx; // Find cBar cBar[index] = 2.0 * cCurr[index] - cOld[index]; } // Find the full combined RHS __global__ static void findRHS(double* cOld, double* cCurr, double* cHalf, double* cNonLinRHS, int nx) { // Matrix index int globalIdx = blockDim.x * blockIdx.x + threadIdx.x; int globalIdy = blockDim.y * blockIdx.y + threadIdx.y; // Set index being computed int index = globalIdy * nx + globalIdx; // Set the RHS for inversion cHalf[index] += - (2.0 / 3.0) * (cCurr[index] - cOld[index]) + cNonLinRHS[index]; // Set cOld to cCurr cOld[index] = cCurr[index]; } // Recover the updated timestep __global__ static void findNew(double* cCurr, double* cBar, double* cHalf, int nx) { // Matrix index int globalIdx = blockDim.x * blockIdx.x + threadIdx.x; int globalIdy = blockDim.y * blockIdx.y + threadIdx.y; // Set index being computed int index = globalIdy * nx + globalIdx; // Recover the new data cCurr[index] = cBar[index] + cHalf[index]; } static double double_rand(double min, double max) { double scale = (double) rand() / (double) RAND_MAX; /* [0, 1.0] */ return min + scale * ( max - min ); /* [min, max] */ } //--------------------------------------------------------------------- // Function to calculate the non linear RHS //--------------------------------------------------------------------- /*! \var typedef double (*devArg1X)(double*, double*, int); \brief The function pointer containing the user defined function to be applied <br> Input 1: The pointer to input data to the function <br> Input 2: The pointer to the coefficients provided by the user <br> Input 3: The current index position (centre of the stencil to be applied) <br> Input 4: Value to be used to jump between rows. (j + 1, j - 1 etc.) <br> Input 5: Size of stencil in x direction <br> Input 6: Size of stencil in y direction */ typedef double (*devArg1XY)(double*, double*, int, int, int, int); __inline__ __device__ double nonLinRHS(double* data, double* coe, int loc, int jump, int nx, int ny) { double result = 0.0; double current; int temp; int count = 0; #pragma unroll for (int j = 0; j < ny; j++) { temp = loc + j * jump; #pragma unroll for (int i = 0; i < nx; i++) { current = data[temp + i]; result += coe[count] * ((current * current * current) - current); count ++; } } return result; } __device__ devArg1XY devFunc = nonLinRHS; // --------------------------------------------------------------------- // Begin main program // --------------------------------------------------------------------- int main(int argc, char *argv[]) { //---------------------------------------- // Simulation paramters //---------------------------------------- // Set coefficients double D = 1.0; double gamma = 0.01; // Set grid spacing -- Use a square grid -- thus all n = ny // Read from command line int nx; nx = atoi(argv[1]); // Set the size of the reduced matrix int size = nx - 2; // Set timing double T = 10.0; // Domain size double lx = 16.0 * M_PI; // Spacings double dx = lx / nx; double dt = 0.1 * dx; // Buffer used for error checking char msgStringBuffer[1024]; // What device to compute on int computeDevice = 0; //---------------------------------------- // Set up GPU grids //---------------------------------------- // Set for inversion int gridInv = (nx % BLOCK_INV == 0) ? (nx / BLOCK_INV) : (nx / BLOCK_INV + 1); dim3 blockDimInv(BLOCK_INV); dim3 gridDimInv(gridInv); // Set for any standard grid int xGrid = (nx % BLOCK_X == 0) ? (nx / BLOCK_X) : (nx / BLOCK_X + 1); int yGrid = (nx % BLOCK_Y == 0) ? (nx / BLOCK_Y) : (nx / BLOCK_Y + 1); dim3 blockDim(BLOCK_X, BLOCK_Y); dim3 gridDim(xGrid, yGrid); //---------------------------------------- // Memory allocation //---------------------------------------- // Old timestep double* cOld; hipMallocManaged(&cOld, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cOld"); checkError(msgStringBuffer); // Current timestep double* cCurr; hipMallocManaged(&cCurr, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cCurr"); checkError(msgStringBuffer); // New timestep double* cNonLinRHS; hipMallocManaged(&cNonLinRHS, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cNonLinRHS"); checkError(msgStringBuffer); // Intermediate step double* cBar; hipMallocManaged(&cBar, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cBar"); checkError(msgStringBuffer); // Intermediate step double* cHalf; hipMallocManaged(&cHalf, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cBar"); checkError(msgStringBuffer); //---------------------------------------- // Initial Condition //---------------------------------------- // Indexing int temp, index; for (int j = 0; j < nx; j++) { temp = j * nx; for (int i = 0; i < nx; i++) { index = temp + i; cOld[index] = double_rand(- 0.1, 0.1); cCurr[index] = cOld[index]; } } //---------------------------------------- // Allocate the memory for the LHS //---------------------------------------- // Lowest diagonal double* ds; hipMallocManaged(&ds, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for ds"); checkError(msgStringBuffer); // Lower diagonal double* dl; hipMallocManaged(&dl, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for dl"); checkError(msgStringBuffer); // Main daigonal double* diag; hipMallocManaged(&diag, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for diag"); checkError(msgStringBuffer); // Upper diagonal double* du; hipMallocManaged(&du, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for du"); checkError(msgStringBuffer); // Highest diagonal double* dw; hipMallocManaged(&dw, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for dw"); checkError(msgStringBuffer); //---------------------------------------- // Set up cuBLAS //---------------------------------------- // Set a handle hipblasHandle_t handleBLAS; // Set a status hipblasStatus_t statusBLAS; // Create the handle statusBLAS = hipblasCreate(&handleBLAS); // Set constants const double alpha = 1.0; const double beta = 0.0; //---------------------------------------- // Set coefficients //---------------------------------------- // Linear coefficient double simgaLin = 2.0 * dt * D * gamma / (3.0 * (pow(dx, 4.0))); // Set the diagonal elements double a = simgaLin; double b = - 4 * simgaLin; double c = 1 + 6 * simgaLin; double d = - 4 * simgaLin; double e = simgaLin; //---------------------------------------- // Set the matrix //---------------------------------------- // Set the LHS for inversion hipLaunchKernelGGL(( setMultiLHS), dim3(gridDim), dim3(blockDim), 0, 0, ds, dl, diag, du, dw, a, b, c, d, e, size, nx); sprintf(msgStringBuffer, "Failed to set LHS matrix for initial timestep"); checkError(msgStringBuffer); // Ensure matrix is set hipDeviceSynchronize(); // Pre-factor the LHS hipLaunchKernelGGL(( pentFactorBatch), dim3(gridDimInv), dim3(blockDimInv), 0, 0, ds, dl, diag, du, dw, size, nx); sprintf(msgStringBuffer, "Failed to pre factor LHS matrix for initial timestep"); checkError(msgStringBuffer); // Ensure matrix is factorised hipDeviceSynchronize(); //---------------------------------------- // Find omega and set inverses //---------------------------------------- double* omega = (double*)malloc(4 * sizeof(double)); if (omega == NULL) { printf("%s \n", "Failed to malloc omega"); } double* inv1Single = (double*)malloc(size * sizeof(double)); if (inv1Single == NULL) { printf("%s \n", "Failed to malloc inv1Single"); } double* inv2Single = (double*)malloc(size * sizeof(double)); if (inv2Single == NULL) { printf("%s \n", "Failed to malloc inv2Single"); } double* inv1Multi; hipMallocManaged(&inv1Multi, nx * size * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for inv1Multi"); checkError(msgStringBuffer); double* inv2Multi; hipMallocManaged(&inv2Multi, nx * size * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for inv2Multi"); checkError(msgStringBuffer); findOmega(omega, inv1Single, inv2Single, a, b, c, d, e, nx); for (int j = 0; j < size; j++) { temp = j * nx; for (int i = 0; i < nx; i++) { index = temp + i; inv1Multi[index] = inv1Single[j]; inv2Multi[index] = inv2Single[j]; } } //---------------------------------------- // Set compute for linear RHS //---------------------------------------- int linHoriz = 5; int linLeft = 2; int linRight = 2; int linVert = 5; int linTop = 2; int linBottom = 2; double* weightsLinRHS; hipMallocManaged(&weightsLinRHS, linHoriz * linVert * sizeof(double)); weightsLinRHS[0] = 0.0; weightsLinRHS[1] = 0.0; weightsLinRHS[2] = - 1.0 * simgaLin; weightsLinRHS[3] = 0.0; weightsLinRHS[4] = 0.0; weightsLinRHS[5] = 0.0; weightsLinRHS[6] = - 2.0 * simgaLin; weightsLinRHS[7] = 8.0 * simgaLin; weightsLinRHS[8] = - 2.0 * simgaLin; weightsLinRHS[9] = 0.0; weightsLinRHS[10] = - 1.0 * simgaLin; weightsLinRHS[11] = 8.0 * simgaLin; weightsLinRHS[12] = - 20.0 * simgaLin; weightsLinRHS[13] = 8.0 * simgaLin; weightsLinRHS[14] = - 1.0 * simgaLin; weightsLinRHS[15] = 0.0; weightsLinRHS[16] = - 2.0 * simgaLin; weightsLinRHS[17] = 8.0 * simgaLin; weightsLinRHS[18] = - 2.0 * simgaLin; weightsLinRHS[19] = 0.0; weightsLinRHS[20] = 0.0; weightsLinRHS[21] = 0.0; weightsLinRHS[22] = -1.0 * simgaLin; weightsLinRHS[23] = 0.0; weightsLinRHS[24] = 0.0; // Set up the compute device structs cuSten_t linRHS; // Set the number of tiles int linInitTiles = 1; // Initialise the instance of the stencil cuStenCreate2DXYp(&linRHS, computeDevice, linInitTiles, nx, nx, BLOCK_X, BLOCK_Y, cHalf, cBar, weightsLinRHS, linHoriz, linLeft, linRight, linVert, linTop, linBottom); // Ensure compute type created hipDeviceSynchronize(); //---------------------------------------- // Set up computation of non-linear RHS //---------------------------------------- // Set up the compute device structs cuSten_t nonLinCompute; // Synchronise to ensure everything initialised hipDeviceSynchronize(); // Copy the function to device memory double* func; hipMemcpyFromSymbol(&func, devFunc, sizeof(devArg1XY)); // Set new non linear coefficient double sigmaNonLin = (dt / 3.0) * D * (2.0 / pow(dx, 2.0)); int numStenHoriz = 3; int numStenLeft = 1; int numStenRight = 1; int numStenVert = 3; int numStenTop = 1; int numStenBottom = 1; int nonLinTiles = 1; double* coe; hipMallocManaged(&coe, numStenHoriz * numStenVert * sizeof(double)); coe[0] = 0.0; coe[1] = 1.0 * sigmaNonLin; coe[2] = 0.0; coe[3] = 1.0 * sigmaNonLin; coe[4] = - 4.0 * sigmaNonLin; coe[5] = 1.0 * sigmaNonLin; coe[6] = 0.0; coe[7] = 1.0 * sigmaNonLin; coe[8] = 0.0; // Initialise the instance of the stencil cuStenCreate2DXYpFun(&nonLinCompute, computeDevice, nonLinTiles, nx, nx, BLOCK_X_FUN, BLOCK_Y_FUN, cNonLinRHS, cCurr, coe, numStenHoriz, numStenLeft, numStenRight, numStenVert, numStenTop, numStenBottom, func); // Synchronise to ensure everything initialised hipDeviceSynchronize(); //---------------------------------------- // Begin timestepping //---------------------------------------- // Track current time-step double t = 0.0; // Define events for timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Store time float time; // Start time hipEventRecord(start, 0 ); while (t < T) { // Set cBar hipLaunchKernelGGL(( findCBar), dim3(gridDim), dim3(blockDim), 0, 0, cOld, cCurr, cBar, nx); // Ensure compute type created hipDeviceSynchronize(); // Compute the non-linear RHS cuStenCompute2DXYpFun(&nonLinCompute, 0); // Compute the linear RHS cuStenCompute2DXYp(&linRHS, 0); // Ensure compute type created hipDeviceSynchronize(); // Find the full RHS and then set cOld to cCurrent hipLaunchKernelGGL(( findRHS), dim3(gridDim), dim3(blockDim), 0, 0, cOld, cCurr, cHalf, cNonLinRHS, nx); // Ensure compute type created hipDeviceSynchronize(); // Transpose the result statusBLAS = hipblasDgeam(handleBLAS, HIPBLAS_OP_T, HIPBLAS_OP_T, nx, nx, &alpha, cHalf, nx, &beta, NULL, nx, cCurr, nx); if (statusBLAS != HIPBLAS_STATUS_SUCCESS) { printf("Unable to compute transpose \n"); return EXIT_FAILURE; } // Ensure transpose completed hipDeviceSynchronize(); // Invert the matrix cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cCurr, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx); // Transpose the result statusBLAS = hipblasDgeam(handleBLAS, HIPBLAS_OP_T, HIPBLAS_OP_T, nx, nx, &alpha, cCurr, nx, &beta, NULL, nx, cHalf, nx); if (statusBLAS != HIPBLAS_STATUS_SUCCESS) { printf("Unable to compute transpose \n"); return EXIT_FAILURE; } // Ensure transpose completed hipDeviceSynchronize(); // Invert the matrix cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cHalf, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx); // Ensure computation completed hipDeviceSynchronize(); hipLaunchKernelGGL(( findNew), dim3(gridDim), dim3(blockDim), 0, 0, cCurr, cBar, cHalf, nx); // Ensure computation completed hipDeviceSynchronize(); // Add on the next time t += dt; } // Ensure computation completed hipDeviceSynchronize(); // End time hipEventRecord(stop, 0); hipEventSynchronize(stop); // Get elapsed time for kernel execution hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("%f \n", time / 1000); //---------------------------------------- // Free memory at the end //---------------------------------------- free(omega); free(inv1Single); free(inv2Single); cuStenDestroy2DXYp(&linRHS); cuStenDestroy2DXYpFun(&nonLinCompute); hipFree(inv1Multi); hipFree(inv2Multi); hipFree(cOld); hipFree(cNonLinRHS); hipFree(cBar); hipFree(cHalf); hipFree(ds); hipFree(dl); hipFree(diag); hipFree(du); hipFree(dw); } // --------------------------------------------------------------------- // End main program // --------------------------------------------------------------------- // --------------------------------------------------------------------- // End of File // ---------------------------------------------------------------------
42215d227d324e35ab96954fa9780a36373b179b.cu
// Andrew Gloster // February 2019 // Program to solve the 2D Cahn-Hilliard equation on a periodic domain using the ADI method // Outputs timing // Copyright 2019 Andrew Gloster // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // --------------------------------------------------------------------- // Standard Libraries and Headers // --------------------------------------------------------------------- #include <math.h> #include <stdlib.h> #include <stdio.h> #include "cuda.h" #include <cublas_v2.h> #include "hdf5.h" #include <time.h> // --------------------------------------------------------------------- // Programmer Libraries and Headers // --------------------------------------------------------------------- #include "../../../cuSten/cuSten.h" #include "cuPentBatch.h" #include "BatchHyper.h" // --------------------------------------------------------------------- // MACROS // --------------------------------------------------------------------- // Block sizes for finding RHS #define BLOCK_X_FUN 8 #define BLOCK_Y_FUN 8 #define BLOCK_X 32 #define BLOCK_Y 32 // Block size for inverting #define BLOCK_INV 64 //--------------------------------------------------------------------- // Static functions for use in main program //--------------------------------------------------------------------- // Find cBar for differencing __global__ static void findCBar(double* cOld, double* cCurr, double* cBar, int nx) { // Matrix index int globalIdx = blockDim.x * blockIdx.x + threadIdx.x; int globalIdy = blockDim.y * blockIdx.y + threadIdx.y; // Set index being computed int index = globalIdy * nx + globalIdx; // Find cBar cBar[index] = 2.0 * cCurr[index] - cOld[index]; } // Find the full combined RHS __global__ static void findRHS(double* cOld, double* cCurr, double* cHalf, double* cNonLinRHS, int nx) { // Matrix index int globalIdx = blockDim.x * blockIdx.x + threadIdx.x; int globalIdy = blockDim.y * blockIdx.y + threadIdx.y; // Set index being computed int index = globalIdy * nx + globalIdx; // Set the RHS for inversion cHalf[index] += - (2.0 / 3.0) * (cCurr[index] - cOld[index]) + cNonLinRHS[index]; // Set cOld to cCurr cOld[index] = cCurr[index]; } // Recover the updated timestep __global__ static void findNew(double* cCurr, double* cBar, double* cHalf, int nx) { // Matrix index int globalIdx = blockDim.x * blockIdx.x + threadIdx.x; int globalIdy = blockDim.y * blockIdx.y + threadIdx.y; // Set index being computed int index = globalIdy * nx + globalIdx; // Recover the new data cCurr[index] = cBar[index] + cHalf[index]; } static double double_rand(double min, double max) { double scale = (double) rand() / (double) RAND_MAX; /* [0, 1.0] */ return min + scale * ( max - min ); /* [min, max] */ } //--------------------------------------------------------------------- // Function to calculate the non linear RHS //--------------------------------------------------------------------- /*! \var typedef double (*devArg1X)(double*, double*, int); \brief The function pointer containing the user defined function to be applied <br> Input 1: The pointer to input data to the function <br> Input 2: The pointer to the coefficients provided by the user <br> Input 3: The current index position (centre of the stencil to be applied) <br> Input 4: Value to be used to jump between rows. (j + 1, j - 1 etc.) <br> Input 5: Size of stencil in x direction <br> Input 6: Size of stencil in y direction */ typedef double (*devArg1XY)(double*, double*, int, int, int, int); __inline__ __device__ double nonLinRHS(double* data, double* coe, int loc, int jump, int nx, int ny) { double result = 0.0; double current; int temp; int count = 0; #pragma unroll for (int j = 0; j < ny; j++) { temp = loc + j * jump; #pragma unroll for (int i = 0; i < nx; i++) { current = data[temp + i]; result += coe[count] * ((current * current * current) - current); count ++; } } return result; } __device__ devArg1XY devFunc = nonLinRHS; // --------------------------------------------------------------------- // Begin main program // --------------------------------------------------------------------- int main(int argc, char *argv[]) { //---------------------------------------- // Simulation paramters //---------------------------------------- // Set coefficients double D = 1.0; double gamma = 0.01; // Set grid spacing -- Use a square grid -- thus all n = ny // Read from command line int nx; nx = atoi(argv[1]); // Set the size of the reduced matrix int size = nx - 2; // Set timing double T = 10.0; // Domain size double lx = 16.0 * M_PI; // Spacings double dx = lx / nx; double dt = 0.1 * dx; // Buffer used for error checking char msgStringBuffer[1024]; // What device to compute on int computeDevice = 0; //---------------------------------------- // Set up GPU grids //---------------------------------------- // Set for inversion int gridInv = (nx % BLOCK_INV == 0) ? (nx / BLOCK_INV) : (nx / BLOCK_INV + 1); dim3 blockDimInv(BLOCK_INV); dim3 gridDimInv(gridInv); // Set for any standard grid int xGrid = (nx % BLOCK_X == 0) ? (nx / BLOCK_X) : (nx / BLOCK_X + 1); int yGrid = (nx % BLOCK_Y == 0) ? (nx / BLOCK_Y) : (nx / BLOCK_Y + 1); dim3 blockDim(BLOCK_X, BLOCK_Y); dim3 gridDim(xGrid, yGrid); //---------------------------------------- // Memory allocation //---------------------------------------- // Old timestep double* cOld; cudaMallocManaged(&cOld, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cOld"); checkError(msgStringBuffer); // Current timestep double* cCurr; cudaMallocManaged(&cCurr, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cCurr"); checkError(msgStringBuffer); // New timestep double* cNonLinRHS; cudaMallocManaged(&cNonLinRHS, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cNonLinRHS"); checkError(msgStringBuffer); // Intermediate step double* cBar; cudaMallocManaged(&cBar, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cBar"); checkError(msgStringBuffer); // Intermediate step double* cHalf; cudaMallocManaged(&cHalf, nx * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for cBar"); checkError(msgStringBuffer); //---------------------------------------- // Initial Condition //---------------------------------------- // Indexing int temp, index; for (int j = 0; j < nx; j++) { temp = j * nx; for (int i = 0; i < nx; i++) { index = temp + i; cOld[index] = double_rand(- 0.1, 0.1); cCurr[index] = cOld[index]; } } //---------------------------------------- // Allocate the memory for the LHS //---------------------------------------- // Lowest diagonal double* ds; cudaMallocManaged(&ds, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for ds"); checkError(msgStringBuffer); // Lower diagonal double* dl; cudaMallocManaged(&dl, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for dl"); checkError(msgStringBuffer); // Main daigonal double* diag; cudaMallocManaged(&diag, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for diag"); checkError(msgStringBuffer); // Upper diagonal double* du; cudaMallocManaged(&du, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for du"); checkError(msgStringBuffer); // Highest diagonal double* dw; cudaMallocManaged(&dw, size * nx * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for dw"); checkError(msgStringBuffer); //---------------------------------------- // Set up cuBLAS //---------------------------------------- // Set a handle cublasHandle_t handleBLAS; // Set a status cublasStatus_t statusBLAS; // Create the handle statusBLAS = cublasCreate(&handleBLAS); // Set constants const double alpha = 1.0; const double beta = 0.0; //---------------------------------------- // Set coefficients //---------------------------------------- // Linear coefficient double simgaLin = 2.0 * dt * D * gamma / (3.0 * (pow(dx, 4.0))); // Set the diagonal elements double a = simgaLin; double b = - 4 * simgaLin; double c = 1 + 6 * simgaLin; double d = - 4 * simgaLin; double e = simgaLin; //---------------------------------------- // Set the matrix //---------------------------------------- // Set the LHS for inversion setMultiLHS<<<gridDim, blockDim>>>(ds, dl, diag, du, dw, a, b, c, d, e, size, nx); sprintf(msgStringBuffer, "Failed to set LHS matrix for initial timestep"); checkError(msgStringBuffer); // Ensure matrix is set cudaDeviceSynchronize(); // Pre-factor the LHS pentFactorBatch<<<gridDimInv, blockDimInv>>>(ds, dl, diag, du, dw, size, nx); sprintf(msgStringBuffer, "Failed to pre factor LHS matrix for initial timestep"); checkError(msgStringBuffer); // Ensure matrix is factorised cudaDeviceSynchronize(); //---------------------------------------- // Find omega and set inverses //---------------------------------------- double* omega = (double*)malloc(4 * sizeof(double)); if (omega == NULL) { printf("%s \n", "Failed to malloc omega"); } double* inv1Single = (double*)malloc(size * sizeof(double)); if (inv1Single == NULL) { printf("%s \n", "Failed to malloc inv1Single"); } double* inv2Single = (double*)malloc(size * sizeof(double)); if (inv2Single == NULL) { printf("%s \n", "Failed to malloc inv2Single"); } double* inv1Multi; cudaMallocManaged(&inv1Multi, nx * size * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for inv1Multi"); checkError(msgStringBuffer); double* inv2Multi; cudaMallocManaged(&inv2Multi, nx * size * sizeof(double)); sprintf(msgStringBuffer, "Failed to allocate memory for inv2Multi"); checkError(msgStringBuffer); findOmega(omega, inv1Single, inv2Single, a, b, c, d, e, nx); for (int j = 0; j < size; j++) { temp = j * nx; for (int i = 0; i < nx; i++) { index = temp + i; inv1Multi[index] = inv1Single[j]; inv2Multi[index] = inv2Single[j]; } } //---------------------------------------- // Set compute for linear RHS //---------------------------------------- int linHoriz = 5; int linLeft = 2; int linRight = 2; int linVert = 5; int linTop = 2; int linBottom = 2; double* weightsLinRHS; cudaMallocManaged(&weightsLinRHS, linHoriz * linVert * sizeof(double)); weightsLinRHS[0] = 0.0; weightsLinRHS[1] = 0.0; weightsLinRHS[2] = - 1.0 * simgaLin; weightsLinRHS[3] = 0.0; weightsLinRHS[4] = 0.0; weightsLinRHS[5] = 0.0; weightsLinRHS[6] = - 2.0 * simgaLin; weightsLinRHS[7] = 8.0 * simgaLin; weightsLinRHS[8] = - 2.0 * simgaLin; weightsLinRHS[9] = 0.0; weightsLinRHS[10] = - 1.0 * simgaLin; weightsLinRHS[11] = 8.0 * simgaLin; weightsLinRHS[12] = - 20.0 * simgaLin; weightsLinRHS[13] = 8.0 * simgaLin; weightsLinRHS[14] = - 1.0 * simgaLin; weightsLinRHS[15] = 0.0; weightsLinRHS[16] = - 2.0 * simgaLin; weightsLinRHS[17] = 8.0 * simgaLin; weightsLinRHS[18] = - 2.0 * simgaLin; weightsLinRHS[19] = 0.0; weightsLinRHS[20] = 0.0; weightsLinRHS[21] = 0.0; weightsLinRHS[22] = -1.0 * simgaLin; weightsLinRHS[23] = 0.0; weightsLinRHS[24] = 0.0; // Set up the compute device structs cuSten_t linRHS; // Set the number of tiles int linInitTiles = 1; // Initialise the instance of the stencil cuStenCreate2DXYp(&linRHS, computeDevice, linInitTiles, nx, nx, BLOCK_X, BLOCK_Y, cHalf, cBar, weightsLinRHS, linHoriz, linLeft, linRight, linVert, linTop, linBottom); // Ensure compute type created cudaDeviceSynchronize(); //---------------------------------------- // Set up computation of non-linear RHS //---------------------------------------- // Set up the compute device structs cuSten_t nonLinCompute; // Synchronise to ensure everything initialised cudaDeviceSynchronize(); // Copy the function to device memory double* func; cudaMemcpyFromSymbol(&func, devFunc, sizeof(devArg1XY)); // Set new non linear coefficient double sigmaNonLin = (dt / 3.0) * D * (2.0 / pow(dx, 2.0)); int numStenHoriz = 3; int numStenLeft = 1; int numStenRight = 1; int numStenVert = 3; int numStenTop = 1; int numStenBottom = 1; int nonLinTiles = 1; double* coe; cudaMallocManaged(&coe, numStenHoriz * numStenVert * sizeof(double)); coe[0] = 0.0; coe[1] = 1.0 * sigmaNonLin; coe[2] = 0.0; coe[3] = 1.0 * sigmaNonLin; coe[4] = - 4.0 * sigmaNonLin; coe[5] = 1.0 * sigmaNonLin; coe[6] = 0.0; coe[7] = 1.0 * sigmaNonLin; coe[8] = 0.0; // Initialise the instance of the stencil cuStenCreate2DXYpFun(&nonLinCompute, computeDevice, nonLinTiles, nx, nx, BLOCK_X_FUN, BLOCK_Y_FUN, cNonLinRHS, cCurr, coe, numStenHoriz, numStenLeft, numStenRight, numStenVert, numStenTop, numStenBottom, func); // Synchronise to ensure everything initialised cudaDeviceSynchronize(); //---------------------------------------- // Begin timestepping //---------------------------------------- // Track current time-step double t = 0.0; // Define events for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Store time float time; // Start time cudaEventRecord(start, 0 ); while (t < T) { // Set cBar findCBar<<<gridDim, blockDim>>>(cOld, cCurr, cBar, nx); // Ensure compute type created cudaDeviceSynchronize(); // Compute the non-linear RHS cuStenCompute2DXYpFun(&nonLinCompute, 0); // Compute the linear RHS cuStenCompute2DXYp(&linRHS, 0); // Ensure compute type created cudaDeviceSynchronize(); // Find the full RHS and then set cOld to cCurrent findRHS<<<gridDim, blockDim>>>(cOld, cCurr, cHalf, cNonLinRHS, nx); // Ensure compute type created cudaDeviceSynchronize(); // Transpose the result statusBLAS = cublasDgeam(handleBLAS, CUBLAS_OP_T, CUBLAS_OP_T, nx, nx, &alpha, cHalf, nx, &beta, NULL, nx, cCurr, nx); if (statusBLAS != CUBLAS_STATUS_SUCCESS) { printf("Unable to compute transpose \n"); return EXIT_FAILURE; } // Ensure transpose completed cudaDeviceSynchronize(); // Invert the matrix cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cCurr, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx); // Transpose the result statusBLAS = cublasDgeam(handleBLAS, CUBLAS_OP_T, CUBLAS_OP_T, nx, nx, &alpha, cCurr, nx, &beta, NULL, nx, cHalf, nx); if (statusBLAS != CUBLAS_STATUS_SUCCESS) { printf("Unable to compute transpose \n"); return EXIT_FAILURE; } // Ensure transpose completed cudaDeviceSynchronize(); // Invert the matrix cyclicInv(ds, dl, diag, du, dw, inv1Multi, inv2Multi, omega, cHalf, a, b, d, e, BLOCK_INV, BLOCK_X, BLOCK_Y, size, nx); // Ensure computation completed cudaDeviceSynchronize(); findNew<<<gridDim, blockDim>>>(cCurr, cBar, cHalf, nx); // Ensure computation completed cudaDeviceSynchronize(); // Add on the next time t += dt; } // Ensure computation completed cudaDeviceSynchronize(); // End time cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Get elapsed time for kernel execution cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("%f \n", time / 1000); //---------------------------------------- // Free memory at the end //---------------------------------------- free(omega); free(inv1Single); free(inv2Single); cuStenDestroy2DXYp(&linRHS); cuStenDestroy2DXYpFun(&nonLinCompute); cudaFree(inv1Multi); cudaFree(inv2Multi); cudaFree(cOld); cudaFree(cNonLinRHS); cudaFree(cBar); cudaFree(cHalf); cudaFree(ds); cudaFree(dl); cudaFree(diag); cudaFree(du); cudaFree(dw); } // --------------------------------------------------------------------- // End main program // --------------------------------------------------------------------- // --------------------------------------------------------------------- // End of File // ---------------------------------------------------------------------
d6162093d40ebbf55ad57fdfc17cbc6bf19b2170.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/DeviceHashTable.cuh" #include "../src/HashFunc.cuh" #include "../util/util.cuh" #include <stdio.h> __device__ void DeviceHashTable::init(const DHTInitBlock &init_blk) { _bkts_p = init_blk.bkts_p; if (init_blk.alloc_p != nullptr) _alloc_p = init_blk.alloc_p; else { _alloc_p = nullptr; } _bkt_num = init_blk.bkt_num; } __device__ void DeviceHashTable::freeBucket(size_type bkt_no) { getBucketPtr(bkt_no)->free(_alloc_p); } __device__ void DeviceHashTable::initBucket(size_type bkt_no, const DHTInitBlock &init_blk) { getBucketPtr(bkt_no)->init(_alloc_p, init_blk.bkt_size, init_blk.max_key_size, init_blk.max_elem_size); } __device__ DeviceHashTable::dhb * DeviceHashTable::getBucketPtr(size_type bkt_no) { return (_bkts_p + bkt_no); } __device__ void DeviceHashTable::setup (uint32_t *nums, unsigned char **ptrs) { _bkt_cnt = nums[0]; _bkt_elem_cnt = nums[1]; _max_key_size = nums[2]; _max_elem_size = nums[2] + nums[3]; _data_ptr = ptrs[2]; _elem_info_ptr = reinterpret_cast<uint32_t *>(ptrs[1]); _bkt_info_ptr = reinterpret_cast<uint32_t *>(ptrs[0]); _data_info_ptr = reinterpret_cast<uint32_t *>(ptrs[3]); } // Lookup functions __device__ uint32_t DeviceHashTable::memorySize() const { return (sizeof(DeviceHashTable) + (_bkt_cnt + 1) * sizeof(uint32_t) + (_max_elem_size + OVERFLOW_COUNT) * 2 * sizeof(uint32_t) + (_max_elem_size + OVERFLOW_COUNT) * (_max_elem_size)); } __device__ uint32_t DeviceHashTable::maxElementCount() const { return ( (_bkt_cnt) * (_bkt_elem_cnt) ); } __device__ uint32_t DeviceHashTable::maxKeySize() const { return (_max_key_size); } __device__ uint32_t DeviceHashTable::maxValueSize() const { return ( _max_elem_size - _max_key_size ); } __device__ uint32_t DeviceHashTable::bucketCount() const { return (_bkt_num); } __device__ void * DeviceHashTable::bucketInfoAddress() const { return reinterpret_cast<void *>(_bkt_info_ptr); } __device__ void * DeviceHashTable::elementInfoAddress() const { return reinterpret_cast<void *>(_elem_info_ptr); } __device__ void * DeviceHashTable::dataAddress() const { return reinterpret_cast<void *>(_data_ptr); } __device__ IstRet DeviceHashTable::insert(const DeviceDataBlock &key, const DeviceDataBlock &value) { // printf("HELLO, %d\n", *(uint32_t*)(key.data)); size_type bkt_no = __hash_func1(key.data, key.size) % _bkt_num; dhb *bkt_info = getBucketPtr(bkt_no); // printf("inst: key: %d, bkt_no: %d, bkt_addr: %d\n", *(uint32_t*)(key.data), bkt_no, (uint32_t)bkt_info); status_type *stat_p; unsigned char *data_p; size_type *size_p; IstRet ret = IstRet::SUCCESSUL; uint32_t dst = atomicAdd(&(bkt_info->_size), 1); if (dst > bkt_info->_capacity) { while (atomicOr(&bkt_info->_capacity, 0) < dst) { for (int i = 0; i < 10000000; i++) ; } } if (dst == bkt_info->_capacity) { ret = IstRet::OVERFLOWED; // ready to reallocate uint32_t counter = bkt_info->_capacity; uint32_t cap = bkt_info->_capacity; uint32_t k = 0; // The first two steps aim to make sure no threads are acquiring data from the // dynamic memory area, because if they read when the reallocation is processing // it will cause serious problems // First, wait all writes are done // TODO: this is silly, it will be changed while (counter != 0) { if (atomicCAS(bkt_info->getStatusPtr(k), VALID, OCCUPIED) == VALID) counter--; k = (k + 1) % cap; } // Second, wait all reads are gone while (atomicCAS(&(bkt_info->_read_num), 0, -99999) != 0) ; // Third, it is the time to reallocate // it will set all status to VALID and set _read_num to 0 again // and it will set the _capacity a new value, so that other waiting // threads can continue to insert bkt_info->reallocate(_alloc_p); } // now we can assure the dst < capacity and we do the insert if (dst < bkt_info->_capacity) { // it can now write stat_p = bkt_info->getStatusPtr(dst); data_p = bkt_info->getDataPtr(dst); size_p = bkt_info->getKeySizePtr(dst); uint32_t r; if ((r = atomicCAS(stat_p, EMPTY, OCCUPIED)) != EMPTY) { return IstRet::UNKNOWN; } *size_p = key.size; *(size_p + 1) = value.size; memcpy(data_p, key.data, key.size); memcpy(data_p + bkt_info->_max_key_size, value.data, value.size); if (atomicCAS(stat_p, OCCUPIED, VALID) != OCCUPIED) { return IstRet::UNKNOWN; } } return ret; } __device__ void DeviceHashTable::find(const DeviceDataBlock &key, DeviceDataBlock &value) { size_type bkt_no = __hash_func1(key.data, key.size) % _bkt_num; dhb *bkt_p = getBucketPtr(bkt_no); // printf("find: key: %d, bkt_no: %d, bkt_addr: %d\n", *(uint32_t*)(key.data), bkt_no, (uint32_t)bkt_p); size_type *stat_p, *size_p; unsigned char *data_p; size_type size; int i; // if the bucket is being reallocated, wait until the process is done while (atomicInc(&bkt_p->_read_num, 0) < 0) ; size = bkt_p->_size; // now the dynamic zone is safe to read for (i = 0; i < size; i++) { stat_p = bkt_p->getStatusPtr(i); size_p = bkt_p->getKeySizePtr(i); data_p = bkt_p->getDataPtr(i); uint32_t ret; // wait until the data is properly written while (atomicInc(stat_p, VALID) < VALID) // add *stat_p if *stat_p >= VALID ; if (key.size == *size_p && datacmp(reinterpret_cast<unsigned char*>(key.data), data_p, key.size) == 0) break; atomicSub(stat_p, 1); } if (i < size) { // found value.size = *(size_p + 1); // printf("%d, %d\n", *(uint32_t*)(key.data), *(uint32_t*)(data_p + _max_key_size)); memcpy(value.data, data_p + bkt_p->_max_key_size, value.size); atomicSub(stat_p, 1); } else { value.size = 0; } atomicSub(&bkt_p->_read_num, 1); } __device__ typename DeviceHashTable::size_type * DeviceHashTable::getBktCntAddr(size_type bkt_no) { return ( &getBucketPtr(bkt_no)->_size ); } __device__ typename DeviceHashTable::size_type * DeviceHashTable::getKeySzAddr(size_type bkt_no, size_type dst) { return ( _elem_info_ptr + bkt_no * _bkt_elem_cnt * 2 + dst * 2 ); } __device__ unsigned char * DeviceHashTable::getDataAddr(size_type bkt_no, size_type dst) { return ( _data_ptr + (bkt_no * _bkt_elem_cnt + dst) * _max_elem_size ); } __device__ DeviceHashTable::status_type * DeviceHashTable::getStatusAddr(size_type bkt_no, size_type dst) { return ( _data_info_ptr + (bkt_no * _bkt_elem_cnt + dst) ); } __device__ DeviceAllocator * DeviceHashTable::getAllocatorPtr() const { return _alloc_p; } __global__ void initDHTKernel(DeviceHashTable *dht, DHTInitBlock init_blk) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = blockDim.x * gridDim.x; // I don't know why, but the hipMalloc in the bucket init function only works if these five lines of // nonsense code exists // ----------------------------------- // if (tid == 0) { // void *ptr; // int k = hipMalloc((void**)&ptr, 32); // hipFree(ptr); // } // ----------------------------------- if (tid == 0) { dht->init(init_blk); } __syncthreads(); // the alloc pointer should be ready while (tid < init_blk.bkt_num) { // init_blk.bkts_p[tid].init(dht->getAllocatorPtr(), init_blk.bkt_size, init_blk.max_key_size, init_blk.max_elem_size); dht->initBucket(tid, init_blk); tid += stride; } } __global__ void freeDHTKernel(DeviceHashTable *dht) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = blockDim.x * gridDim.x; uint32_t n = dht->bucketCount(); uint32_t bkt_no = tid; while (bkt_no < n) { dht->freeBucket(bkt_no); bkt_no += stride; } } __host__ void createDeviceHashTable( DeviceHashTable *&dht, uint32_t max_elem_cnt, uint32_t bkt_cnt, uint32_t max_key_size, uint32_t max_val_size, DeviceAllocator *alloc_p) { uint32_t bkt_size = max_elem_cnt / bkt_cnt; size_t temp; uint32_t total_size = sizeof(DeviceHashTable) + bkt_cnt * sizeof(DeviceHashBucket); HANDLE_ERROR(hipMalloc((void**)&dht, total_size)); size_t total_bkt_size = bkt_cnt * bkt_size * (max_key_size + max_val_size + 3 * sizeof(uint32_t)); // printf("%d\n", total_bkt_size); hipThreadGetLimit(&temp, hipLimitMallocHeapSize); if (total_bkt_size * 8 > temp) { HANDLE_ERROR( hipThreadSetLimit(hipLimitMallocHeapSize, total_bkt_size * 8) ); hipThreadGetLimit(&temp, hipLimitMallocHeapSize); // printf("actual: %d\n", temp); } unsigned char *ptr = reinterpret_cast<unsigned char *>(dht); ptr += sizeof(DeviceHashTable); DHTInitBlock dib { reinterpret_cast<DeviceHashBucket *>(ptr), alloc_p, bkt_cnt, bkt_size, max_key_size, max_key_size + max_val_size }; hipLaunchKernelGGL(( initDHTKernel), dim3(4), dim3(64), 0, 0, dht, dib); HANDLE_ERROR(hipDeviceSynchronize()); } __host__ void destroyDeviceHashTable(DeviceHashTable *dht) { hipLaunchKernelGGL(( freeDHTKernel), dim3(4), dim3(64), 0, 0, dht); hipDeviceSynchronize(); hipFree(dht); } __global__ void setupKernel(DeviceHashTable *dht, uint32_t *nums, unsigned char **ptrs) { dht->setup(nums, ptrs); } __global__ void getInfoKernel(DeviceHashTable *dht, uint32_t *output, void **output_ptrs) { output[0] = dht->memorySize(); output[1] = dht->maxElementCount(); output[2] = dht->maxKeySize(); output[3] = dht->maxValueSize(); output[4] = dht->bucketCount(); output_ptrs[0] = dht->bucketInfoAddress(); output_ptrs[1] = dht->elementInfoAddress(); output_ptrs[2] = dht->dataAddress(); } __global__ void insertKernel(DeviceHashTable *dht, DeviceHashTableInsertBlock buf) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = gridDim.x * blockDim.x; DeviceDataBlock key_blk, val_blk; IstRet ret; while (tid < buf.len) { key_blk.data = buf.key_buf + tid * buf.max_key_size; key_blk.size = buf.key_size_buf[tid]; val_blk.data = buf.val_buf + tid * buf.max_val_size; val_blk.size = buf.val_size_buf[tid]; ret = dht->insert(key_blk, val_blk); if (buf.ret_buf != nullptr) buf.ret_buf[tid] = ret; tid += stride; } } __global__ void findKernel(DeviceHashTable *dht, DeviceHashTableFindBlock buf) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = gridDim.x * blockDim.x; DeviceDataBlock key_blk, val_blk; while (tid < buf.len) { key_blk.data = buf.key_buf + tid * buf.max_key_size; key_blk.size = buf.key_size_buf[tid]; val_blk.data = buf.val_buf + tid * buf.max_val_size; dht->find(key_blk, val_blk); // value data is already copied to output buffer buf.val_size_buf[tid] = val_blk.size; // if not found, this size is 0, the user shall know. tid += stride; } }
d6162093d40ebbf55ad57fdfc17cbc6bf19b2170.cu
#include "../include/DeviceHashTable.cuh" #include "../src/HashFunc.cuh" #include "../util/util.cuh" #include <stdio.h> __device__ void DeviceHashTable::init(const DHTInitBlock &init_blk) { _bkts_p = init_blk.bkts_p; if (init_blk.alloc_p != nullptr) _alloc_p = init_blk.alloc_p; else { _alloc_p = nullptr; } _bkt_num = init_blk.bkt_num; } __device__ void DeviceHashTable::freeBucket(size_type bkt_no) { getBucketPtr(bkt_no)->free(_alloc_p); } __device__ void DeviceHashTable::initBucket(size_type bkt_no, const DHTInitBlock &init_blk) { getBucketPtr(bkt_no)->init(_alloc_p, init_blk.bkt_size, init_blk.max_key_size, init_blk.max_elem_size); } __device__ DeviceHashTable::dhb * DeviceHashTable::getBucketPtr(size_type bkt_no) { return (_bkts_p + bkt_no); } __device__ void DeviceHashTable::setup (uint32_t *nums, unsigned char **ptrs) { _bkt_cnt = nums[0]; _bkt_elem_cnt = nums[1]; _max_key_size = nums[2]; _max_elem_size = nums[2] + nums[3]; _data_ptr = ptrs[2]; _elem_info_ptr = reinterpret_cast<uint32_t *>(ptrs[1]); _bkt_info_ptr = reinterpret_cast<uint32_t *>(ptrs[0]); _data_info_ptr = reinterpret_cast<uint32_t *>(ptrs[3]); } // Lookup functions __device__ uint32_t DeviceHashTable::memorySize() const { return (sizeof(DeviceHashTable) + (_bkt_cnt + 1) * sizeof(uint32_t) + (_max_elem_size + OVERFLOW_COUNT) * 2 * sizeof(uint32_t) + (_max_elem_size + OVERFLOW_COUNT) * (_max_elem_size)); } __device__ uint32_t DeviceHashTable::maxElementCount() const { return ( (_bkt_cnt) * (_bkt_elem_cnt) ); } __device__ uint32_t DeviceHashTable::maxKeySize() const { return (_max_key_size); } __device__ uint32_t DeviceHashTable::maxValueSize() const { return ( _max_elem_size - _max_key_size ); } __device__ uint32_t DeviceHashTable::bucketCount() const { return (_bkt_num); } __device__ void * DeviceHashTable::bucketInfoAddress() const { return reinterpret_cast<void *>(_bkt_info_ptr); } __device__ void * DeviceHashTable::elementInfoAddress() const { return reinterpret_cast<void *>(_elem_info_ptr); } __device__ void * DeviceHashTable::dataAddress() const { return reinterpret_cast<void *>(_data_ptr); } __device__ IstRet DeviceHashTable::insert(const DeviceDataBlock &key, const DeviceDataBlock &value) { // printf("HELLO, %d\n", *(uint32_t*)(key.data)); size_type bkt_no = __hash_func1(key.data, key.size) % _bkt_num; dhb *bkt_info = getBucketPtr(bkt_no); // printf("inst: key: %d, bkt_no: %d, bkt_addr: %d\n", *(uint32_t*)(key.data), bkt_no, (uint32_t)bkt_info); status_type *stat_p; unsigned char *data_p; size_type *size_p; IstRet ret = IstRet::SUCCESSUL; uint32_t dst = atomicAdd(&(bkt_info->_size), 1); if (dst > bkt_info->_capacity) { while (atomicOr(&bkt_info->_capacity, 0) < dst) { for (int i = 0; i < 10000000; i++) ; } } if (dst == bkt_info->_capacity) { ret = IstRet::OVERFLOWED; // ready to reallocate uint32_t counter = bkt_info->_capacity; uint32_t cap = bkt_info->_capacity; uint32_t k = 0; // The first two steps aim to make sure no threads are acquiring data from the // dynamic memory area, because if they read when the reallocation is processing // it will cause serious problems // First, wait all writes are done // TODO: this is silly, it will be changed while (counter != 0) { if (atomicCAS(bkt_info->getStatusPtr(k), VALID, OCCUPIED) == VALID) counter--; k = (k + 1) % cap; } // Second, wait all reads are gone while (atomicCAS(&(bkt_info->_read_num), 0, -99999) != 0) ; // Third, it is the time to reallocate // it will set all status to VALID and set _read_num to 0 again // and it will set the _capacity a new value, so that other waiting // threads can continue to insert bkt_info->reallocate(_alloc_p); } // now we can assure the dst < capacity and we do the insert if (dst < bkt_info->_capacity) { // it can now write stat_p = bkt_info->getStatusPtr(dst); data_p = bkt_info->getDataPtr(dst); size_p = bkt_info->getKeySizePtr(dst); uint32_t r; if ((r = atomicCAS(stat_p, EMPTY, OCCUPIED)) != EMPTY) { return IstRet::UNKNOWN; } *size_p = key.size; *(size_p + 1) = value.size; memcpy(data_p, key.data, key.size); memcpy(data_p + bkt_info->_max_key_size, value.data, value.size); if (atomicCAS(stat_p, OCCUPIED, VALID) != OCCUPIED) { return IstRet::UNKNOWN; } } return ret; } __device__ void DeviceHashTable::find(const DeviceDataBlock &key, DeviceDataBlock &value) { size_type bkt_no = __hash_func1(key.data, key.size) % _bkt_num; dhb *bkt_p = getBucketPtr(bkt_no); // printf("find: key: %d, bkt_no: %d, bkt_addr: %d\n", *(uint32_t*)(key.data), bkt_no, (uint32_t)bkt_p); size_type *stat_p, *size_p; unsigned char *data_p; size_type size; int i; // if the bucket is being reallocated, wait until the process is done while (atomicInc(&bkt_p->_read_num, 0) < 0) ; size = bkt_p->_size; // now the dynamic zone is safe to read for (i = 0; i < size; i++) { stat_p = bkt_p->getStatusPtr(i); size_p = bkt_p->getKeySizePtr(i); data_p = bkt_p->getDataPtr(i); uint32_t ret; // wait until the data is properly written while (atomicInc(stat_p, VALID) < VALID) // add *stat_p if *stat_p >= VALID ; if (key.size == *size_p && datacmp(reinterpret_cast<unsigned char*>(key.data), data_p, key.size) == 0) break; atomicSub(stat_p, 1); } if (i < size) { // found value.size = *(size_p + 1); // printf("%d, %d\n", *(uint32_t*)(key.data), *(uint32_t*)(data_p + _max_key_size)); memcpy(value.data, data_p + bkt_p->_max_key_size, value.size); atomicSub(stat_p, 1); } else { value.size = 0; } atomicSub(&bkt_p->_read_num, 1); } __device__ typename DeviceHashTable::size_type * DeviceHashTable::getBktCntAddr(size_type bkt_no) { return ( &getBucketPtr(bkt_no)->_size ); } __device__ typename DeviceHashTable::size_type * DeviceHashTable::getKeySzAddr(size_type bkt_no, size_type dst) { return ( _elem_info_ptr + bkt_no * _bkt_elem_cnt * 2 + dst * 2 ); } __device__ unsigned char * DeviceHashTable::getDataAddr(size_type bkt_no, size_type dst) { return ( _data_ptr + (bkt_no * _bkt_elem_cnt + dst) * _max_elem_size ); } __device__ DeviceHashTable::status_type * DeviceHashTable::getStatusAddr(size_type bkt_no, size_type dst) { return ( _data_info_ptr + (bkt_no * _bkt_elem_cnt + dst) ); } __device__ DeviceAllocator * DeviceHashTable::getAllocatorPtr() const { return _alloc_p; } __global__ void initDHTKernel(DeviceHashTable *dht, DHTInitBlock init_blk) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = blockDim.x * gridDim.x; // I don't know why, but the cudaMalloc in the bucket init function only works if these five lines of // nonsense code exists // ----------------------------------- // if (tid == 0) { // void *ptr; // int k = cudaMalloc((void**)&ptr, 32); // cudaFree(ptr); // } // ----------------------------------- if (tid == 0) { dht->init(init_blk); } __syncthreads(); // the alloc pointer should be ready while (tid < init_blk.bkt_num) { // init_blk.bkts_p[tid].init(dht->getAllocatorPtr(), init_blk.bkt_size, init_blk.max_key_size, init_blk.max_elem_size); dht->initBucket(tid, init_blk); tid += stride; } } __global__ void freeDHTKernel(DeviceHashTable *dht) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = blockDim.x * gridDim.x; uint32_t n = dht->bucketCount(); uint32_t bkt_no = tid; while (bkt_no < n) { dht->freeBucket(bkt_no); bkt_no += stride; } } __host__ void createDeviceHashTable( DeviceHashTable *&dht, uint32_t max_elem_cnt, uint32_t bkt_cnt, uint32_t max_key_size, uint32_t max_val_size, DeviceAllocator *alloc_p) { uint32_t bkt_size = max_elem_cnt / bkt_cnt; size_t temp; uint32_t total_size = sizeof(DeviceHashTable) + bkt_cnt * sizeof(DeviceHashBucket); HANDLE_ERROR(cudaMalloc((void**)&dht, total_size)); size_t total_bkt_size = bkt_cnt * bkt_size * (max_key_size + max_val_size + 3 * sizeof(uint32_t)); // printf("%d\n", total_bkt_size); cudaThreadGetLimit(&temp, cudaLimitMallocHeapSize); if (total_bkt_size * 8 > temp) { HANDLE_ERROR( cudaThreadSetLimit(cudaLimitMallocHeapSize, total_bkt_size * 8) ); cudaThreadGetLimit(&temp, cudaLimitMallocHeapSize); // printf("actual: %d\n", temp); } unsigned char *ptr = reinterpret_cast<unsigned char *>(dht); ptr += sizeof(DeviceHashTable); DHTInitBlock dib { reinterpret_cast<DeviceHashBucket *>(ptr), alloc_p, bkt_cnt, bkt_size, max_key_size, max_key_size + max_val_size }; initDHTKernel<<<4, 64>>>(dht, dib); HANDLE_ERROR(cudaDeviceSynchronize()); } __host__ void destroyDeviceHashTable(DeviceHashTable *dht) { freeDHTKernel<<<4, 64>>>(dht); cudaDeviceSynchronize(); cudaFree(dht); } __global__ void setupKernel(DeviceHashTable *dht, uint32_t *nums, unsigned char **ptrs) { dht->setup(nums, ptrs); } __global__ void getInfoKernel(DeviceHashTable *dht, uint32_t *output, void **output_ptrs) { output[0] = dht->memorySize(); output[1] = dht->maxElementCount(); output[2] = dht->maxKeySize(); output[3] = dht->maxValueSize(); output[4] = dht->bucketCount(); output_ptrs[0] = dht->bucketInfoAddress(); output_ptrs[1] = dht->elementInfoAddress(); output_ptrs[2] = dht->dataAddress(); } __global__ void insertKernel(DeviceHashTable *dht, DeviceHashTableInsertBlock buf) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = gridDim.x * blockDim.x; DeviceDataBlock key_blk, val_blk; IstRet ret; while (tid < buf.len) { key_blk.data = buf.key_buf + tid * buf.max_key_size; key_blk.size = buf.key_size_buf[tid]; val_blk.data = buf.val_buf + tid * buf.max_val_size; val_blk.size = buf.val_size_buf[tid]; ret = dht->insert(key_blk, val_blk); if (buf.ret_buf != nullptr) buf.ret_buf[tid] = ret; tid += stride; } } __global__ void findKernel(DeviceHashTable *dht, DeviceHashTableFindBlock buf) { uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x; uint32_t stride = gridDim.x * blockDim.x; DeviceDataBlock key_blk, val_blk; while (tid < buf.len) { key_blk.data = buf.key_buf + tid * buf.max_key_size; key_blk.size = buf.key_size_buf[tid]; val_blk.data = buf.val_buf + tid * buf.max_val_size; dht->find(key_blk, val_blk); // value data is already copied to output buffer buf.val_size_buf[tid] = val_blk.size; // if not found, this size is 0, the user shall know. tid += stride; } }
54ab1802418f245a390f51bf8be77e213b3d89b9.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Helper functions for mapping CUTLASS concepts to cuBLAS. */ #include <stdexcept> #if CUTLASS_ENABLE_CUBLAS #include "cublas_helpers.h" namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Converts a cuBLAS status to cutlass::Status Status get_cutlass_status(hipblasStatus_t cublas) { switch (cublas) { case HIPBLAS_STATUS_SUCCESS: return Status::kSuccess; case HIPBLAS_STATUS_INVALID_VALUE: return Status::kErrorInvalidProblem; case HIPBLAS_STATUS_NOT_SUPPORTED: return Status::kErrorNotSupported; default: break; } return Status::kErrorInternal; } /// Converts a cuBLAS status to cutlass::profiler::Disposition Disposition get_cutlass_disposition(hipblasStatus_t cublas_status) { if (cublas_status == HIPBLAS_STATUS_INVALID_VALUE) { return Disposition::kInvalidProblem; } else if (cublas_status == HIPBLAS_STATUS_NOT_SUPPORTED) { return Disposition::kNotSupported; } return Disposition::kFailed; } /// Maps a CUTLASS tensor layout to a cuBLAS transpose operation bool get_cublas_transpose_operation( hipblasOperation_t &operation, library::LayoutTypeID layout, library::ComplexTransform transform) { switch (layout) { case library::LayoutTypeID::kColumnMajor: if (transform == library::ComplexTransform::kNone) { operation = HIPBLAS_OP_N; return true; } else { return false; } break; case library::LayoutTypeID::kRowMajor: if (transform == library::ComplexTransform::kNone) { operation = HIPBLAS_OP_T; return true; } else if (transform == library::ComplexTransform::kConjugate) { operation = HIPBLAS_OP_C; return true; } break; default: break; } return false; } /// Maps a CUTLASS numeric type to a cuBLAS data type enumeration bool get_cublas_datatype(hipDataType &data_type, library::NumericTypeID element_type) { switch (element_type) { case library::NumericTypeID::kFE4M3: #if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) data_type = CUDA_R_8F_E4M3; return true; #endif break; case library::NumericTypeID::kFE5M2: #if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) data_type = CUDA_R_8F_E5M2; return true; #endif break; case library::NumericTypeID::kF16: data_type = HIP_R_16F; return true; case library::NumericTypeID::kBF16: data_type = CUDA_R_16BF; return true; case library::NumericTypeID::kTF32: break; case library::NumericTypeID::kF32: data_type = HIP_R_32F; return true; case library::NumericTypeID::kF64: data_type = HIP_R_64F; return true; case library::NumericTypeID::kS4: break; case library::NumericTypeID::kS8: data_type = HIP_R_8I; return true; case library::NumericTypeID::kS16: break; case library::NumericTypeID::kS32: data_type = HIP_R_32I; return true; case library::NumericTypeID::kS64: break; case library::NumericTypeID::kU4: break; case library::NumericTypeID::kU8: data_type = HIP_R_8U; return true; case library::NumericTypeID::kU16: break; case library::NumericTypeID::kU32: data_type = HIP_R_32U; return true; case library::NumericTypeID::kU64: break; case library::NumericTypeID::kB1: break; case library::NumericTypeID::kCF32: data_type = HIP_C_32F; return true; case library::NumericTypeID::kCF64: data_type = HIP_C_64F; return true; case library::NumericTypeID::kInvalid: default: break; } return false; } /// Maps a cutlass::SideMode to cuBLAS side mode bool get_cublas_side_mode(hipblasSideMode_t& side, SideMode side_mode) { switch (side_mode) { case SideMode::kLeft: side = HIPBLAS_SIDE_LEFT; return true; case SideMode::kRight: side = HIPBLAS_SIDE_RIGHT; return true; default: break; } return false; } /// Maps a cutlass::FillMode to cuBLAS fill mode bool get_cublas_fill_mode(hipblasFillMode_t& uplo, FillMode fill_mode) { switch (fill_mode) { case FillMode::kLower: uplo = HIPBLAS_FILL_MODE_LOWER; return true; case FillMode::kUpper: uplo = HIPBLAS_FILL_MODE_UPPER; return true; default: break; } return false; } /// Maps a cutlass::DiagType to cuBLAS diag type bool get_cublas_diag_type(hipblasDiagType_t& diag, DiagType diag_type) { switch (diag_type) { case DiagType::kNonUnit: diag = HIPBLAS_DIAG_NON_UNIT; return true; case DiagType::kUnit: diag = HIPBLAS_DIAG_UNIT; return true; default: break; } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Gets the cublas algorithm given threadblock tile dimensions and math opcode class hipblasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) { return (opcode_class == library::OpcodeClassID::kSimt ? HIPBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular GEMM description Status cublas_satisfies(library::GemmDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.C.element == library::NumericTypeID::kS4 || desc.C.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasGemmExDispatcher::cublasGemmExDispatcher( library::GemmDescription const &op_desc, library::GemmUniversalConfiguration configuration_, library::GemmUniversalArguments arguments_, hipblasGemmAlgo_t algorithm ): configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) { bool good = true; good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_B, op_desc.B.element)); good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && op_desc.B.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case HIP_R_32F: case HIP_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case HIP_R_64F: case HIP_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case HIP_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case HIP_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes GEMM using these arguments hipblasStatus_t cublasGemmExDispatcher::operator()(hipblasHandle_t handle) { if (configuration.mode == library::GemmUniversalMode::kBatched) { return hipblasGemmStridedBatchedEx( handle, trans_A, trans_B, configuration.problem_size.m(), configuration.problem_size.n(), configuration.problem_size.k(), arguments.alpha, arguments.A, data_type_A, int(configuration.lda), arguments.batch_stride_A, arguments.B, data_type_B, int(configuration.ldb), arguments.batch_stride_B, arguments.beta, arguments.D, data_type_C, int(configuration.ldc), arguments.batch_stride_C, configuration.batch_count, #if (__CUDACC_VER_MAJOR__ >= 11) compute_type, #else compute_data_type, #endif algo ); } else { return hipblasGemmEx( handle, trans_A, trans_B, configuration.problem_size.m(), configuration.problem_size.n(), configuration.problem_size.k(), arguments.alpha, arguments.A, data_type_A, int(configuration.lda), arguments.B, data_type_B, int(configuration.ldb), arguments.beta, arguments.D, data_type_C, int(configuration.ldc), #if (__CUDACC_VER_MAJOR__ >= 11) compute_type, #else compute_data_type, #endif algo ); } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular RankK description Status cublas_satisfies(library::RankKDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.C.element == library::NumericTypeID::kS4 || desc.C.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasRankKDispatcher::cublasRankKDispatcher( library::RankKDescription const &op_desc, library::RankKConfiguration configuration_, library::RankKArguments arguments_ ): configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { blas_mode = op_desc.blas_mode; num_ranks = op_desc.num_ranks; bool good = true; good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case HIP_R_32F: case HIP_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case HIP_R_64F: case HIP_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case HIP_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case HIP_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes RankK using these arguments hipblasStatus_t cublasRankKDispatcher::operator()(hipblasHandle_t handle) { // SYRK and HERK if (num_ranks == 1) { if (data_type_A == data_type_C && data_type_A == HIP_R_64F) { return hipblasDsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.beta), static_cast<double*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == HIP_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif return hipblasSsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.beta), static_cast<float*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == HIP_C_64F) { if (blas_mode == BlasMode::kHermitian) { return hipblasZherk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const double*>(arguments.alpha), static_cast<const hipDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.beta), static_cast<hipDoubleComplex*>(arguments.D), int(configuration.ldc) ); } else { return hipblasZsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const hipDoubleComplex*>(arguments.alpha), static_cast<const hipDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const hipDoubleComplex*>(arguments.beta), static_cast<hipDoubleComplex*>(arguments.D), int(configuration.ldc) ); } } else if (data_type_A == data_type_C && data_type_A == HIP_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif if (blas_mode == BlasMode::kHermitian) { return hipblasCherk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const float*>(arguments.alpha), static_cast<const hipComplex*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.beta), static_cast<hipComplex*>(arguments.D), int(configuration.ldc) ); } else { return hipblasCsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const hipComplex*>(arguments.alpha), static_cast<const hipComplex*>(arguments.A), int(configuration.lda), static_cast<const hipComplex*>(arguments.beta), static_cast<hipComplex*>(arguments.D), int(configuration.ldc) ); } } else { return HIPBLAS_STATUS_NOT_SUPPORTED; } } // SYR2K and HER2K else if (num_ranks == 2) { if (data_type_A == data_type_C && data_type_A == HIP_R_64F) { return hipblasDsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.B), int(configuration.ldb), static_cast<const double*>(arguments.beta), static_cast<double*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == HIP_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif return hipblasSsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.B), int(configuration.ldb), static_cast<const float*>(arguments.beta), static_cast<float*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == HIP_C_64F) { if (blas_mode == BlasMode::kHermitian) { return hipblasZher2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const hipDoubleComplex*>(arguments.alpha), static_cast<const hipDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const hipDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const double*>(arguments.beta), static_cast<hipDoubleComplex*>(arguments.D), int(configuration.ldc) ); } else { return hipblasZsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const hipDoubleComplex*>(arguments.alpha), static_cast<const hipDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const hipDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const hipDoubleComplex*>(arguments.beta), static_cast<hipDoubleComplex*>(arguments.D), int(configuration.ldc) ); } } else if (data_type_A == data_type_C && data_type_A == HIP_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif if (blas_mode == BlasMode::kHermitian) { return hipblasCher2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const hipComplex*>(arguments.alpha), static_cast<const hipComplex*>(arguments.A), int(configuration.lda), static_cast<const hipComplex*>(arguments.B), int(configuration.ldb), static_cast<const float*>(arguments.beta), static_cast<hipComplex*>(arguments.D), int(configuration.ldc) ); } else { return hipblasCsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const hipComplex*>(arguments.alpha), static_cast<const hipComplex*>(arguments.A), int(configuration.lda), static_cast<const hipComplex*>(arguments.B), int(configuration.ldb), static_cast<const hipComplex*>(arguments.beta), static_cast<hipComplex*>(arguments.D), int(configuration.ldc) ); } } else { return HIPBLAS_STATUS_NOT_SUPPORTED; } } else { return HIPBLAS_STATUS_NOT_SUPPORTED; } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular TRMM description Status cublas_satisfies(library::TrmmDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.D.element == library::NumericTypeID::kS4 || desc.D.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasTrmmDispatcher::cublasTrmmDispatcher( library::TrmmDescription const &op_desc, library::TrmmConfiguration configuration_, library::TrmmArguments arguments_ ): configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { bool good = true; good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); good = (good && get_cublas_side_mode(side, op_desc.side_mode)); good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); good = (good && get_cublas_diag_type(diag, op_desc.diag_type)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_B, op_desc.B.element)); good = (good && get_cublas_datatype(data_type_D, op_desc.D.element)); // if A is Transposed, then for cuBLAS that is inverted Fill Mode. if (trans_A == HIPBLAS_OP_T || trans_A == HIPBLAS_OP_C) { if (uplo == HIPBLAS_FILL_MODE_LOWER) uplo = HIPBLAS_FILL_MODE_UPPER; else uplo = HIPBLAS_FILL_MODE_LOWER; } good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case HIP_R_32F: case HIP_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case HIP_R_64F: case HIP_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case HIP_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case HIP_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes TRMM using these arguments hipblasStatus_t cublasTrmmDispatcher::operator()(hipblasHandle_t handle) { if (data_type_A == data_type_D && data_type_A == HIP_R_64F) { return hipblasDtrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.B), int(configuration.ldb), static_cast<double*>(arguments.D), int(configuration.ldd) ); } else if (data_type_A == data_type_D && data_type_A == HIP_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif return hipblasStrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.B), int(configuration.ldb), static_cast<float*>(arguments.D), int(configuration.ldd) ); } else if (data_type_A == data_type_D && data_type_A == HIP_C_64F) { return hipblasZtrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const hipDoubleComplex*>(arguments.alpha), static_cast<const hipDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const hipDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<hipDoubleComplex*>(arguments.D), int(configuration.ldd) ); } else if (data_type_A == data_type_D && data_type_A == HIP_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif return hipblasCtrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const hipComplex*>(arguments.alpha), static_cast<const hipComplex*>(arguments.A), int(configuration.lda), static_cast<const hipComplex*>(arguments.B), int(configuration.ldb), static_cast<hipComplex*>(arguments.D), int(configuration.ldd) ); } else { return HIPBLAS_STATUS_NOT_SUPPORTED; } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular Symm description Status cublas_satisfies(library::SymmDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.C.element == library::NumericTypeID::kS4 || desc.C.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.B.element == library::NumericTypeID::kBF16 || desc.B.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } // only column major layout is supported in cuBLAS if (desc.A.layout != library::LayoutTypeID::kColumnMajor || desc.transform_A != library::ComplexTransform::kNone) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasSymmDispatcher::cublasSymmDispatcher( library::SymmDescription const &op_desc, library::SymmConfiguration configuration_, library::SymmArguments arguments_ ): configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { blas_mode = op_desc.blas_mode; bool good = true; good = (good && get_cublas_side_mode(side, op_desc.side_mode)); good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case HIP_R_32F: case HIP_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case HIP_R_64F: case HIP_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case HIP_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case HIP_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes Symm using these arguments hipblasStatus_t cublasSymmDispatcher::operator()(hipblasHandle_t handle) { // SYMM and HEMM if (data_type_A == data_type_C && data_type_A == HIP_R_64F) { return hipblasDsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.B), int(configuration.ldb), static_cast<const double*>(arguments.beta), static_cast<double*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == HIP_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif return hipblasSsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.B), int(configuration.ldb), static_cast<const float*>(arguments.beta), static_cast<float*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == HIP_C_64F) { if (blas_mode == BlasMode::kHermitian) { return hipblasZhemm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const hipDoubleComplex*>(arguments.alpha), static_cast<const hipDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const hipDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const hipDoubleComplex*>(arguments.beta), static_cast<hipDoubleComplex*>(arguments.D), int(configuration.ldc) ); } else { return hipblasZsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const hipDoubleComplex*>(arguments.alpha), static_cast<const hipDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const hipDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const hipDoubleComplex*>(arguments.beta), static_cast<hipDoubleComplex*>(arguments.D), int(configuration.ldc) ); } } else if (data_type_A == data_type_C && data_type_A == HIP_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != HIPBLAS_STATUS_SUCCESS) return HIPBLAS_STATUS_NOT_SUPPORTED; #endif if (blas_mode == BlasMode::kHermitian) { return hipblasChemm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const hipComplex*>(arguments.alpha), static_cast<const hipComplex*>(arguments.A), int(configuration.lda), static_cast<const hipComplex*>(arguments.B), int(configuration.ldb), static_cast<const hipComplex*>(arguments.beta), static_cast<hipComplex*>(arguments.D), int(configuration.ldc) ); } else { return hipblasCsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const hipComplex*>(arguments.alpha), static_cast<const hipComplex*>(arguments.A), int(configuration.lda), static_cast<const hipComplex*>(arguments.B), int(configuration.ldb), static_cast<const hipComplex*>(arguments.beta), static_cast<hipComplex*>(arguments.D), int(configuration.ldc) ); } } else { return HIPBLAS_STATUS_NOT_SUPPORTED; } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass #endif // #if CUTLASS_ENABLE_CUBLAS
54ab1802418f245a390f51bf8be77e213b3d89b9.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Helper functions for mapping CUTLASS concepts to cuBLAS. */ #include <stdexcept> #if CUTLASS_ENABLE_CUBLAS #include "cublas_helpers.h" namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Converts a cuBLAS status to cutlass::Status Status get_cutlass_status(cublasStatus_t cublas) { switch (cublas) { case CUBLAS_STATUS_SUCCESS: return Status::kSuccess; case CUBLAS_STATUS_INVALID_VALUE: return Status::kErrorInvalidProblem; case CUBLAS_STATUS_NOT_SUPPORTED: return Status::kErrorNotSupported; default: break; } return Status::kErrorInternal; } /// Converts a cuBLAS status to cutlass::profiler::Disposition Disposition get_cutlass_disposition(cublasStatus_t cublas_status) { if (cublas_status == CUBLAS_STATUS_INVALID_VALUE) { return Disposition::kInvalidProblem; } else if (cublas_status == CUBLAS_STATUS_NOT_SUPPORTED) { return Disposition::kNotSupported; } return Disposition::kFailed; } /// Maps a CUTLASS tensor layout to a cuBLAS transpose operation bool get_cublas_transpose_operation( cublasOperation_t &operation, library::LayoutTypeID layout, library::ComplexTransform transform) { switch (layout) { case library::LayoutTypeID::kColumnMajor: if (transform == library::ComplexTransform::kNone) { operation = CUBLAS_OP_N; return true; } else { return false; } break; case library::LayoutTypeID::kRowMajor: if (transform == library::ComplexTransform::kNone) { operation = CUBLAS_OP_T; return true; } else if (transform == library::ComplexTransform::kConjugate) { operation = CUBLAS_OP_C; return true; } break; default: break; } return false; } /// Maps a CUTLASS numeric type to a cuBLAS data type enumeration bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type) { switch (element_type) { case library::NumericTypeID::kFE4M3: #if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) data_type = CUDA_R_8F_E4M3; return true; #endif break; case library::NumericTypeID::kFE5M2: #if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8)) data_type = CUDA_R_8F_E5M2; return true; #endif break; case library::NumericTypeID::kF16: data_type = CUDA_R_16F; return true; case library::NumericTypeID::kBF16: data_type = CUDA_R_16BF; return true; case library::NumericTypeID::kTF32: break; case library::NumericTypeID::kF32: data_type = CUDA_R_32F; return true; case library::NumericTypeID::kF64: data_type = CUDA_R_64F; return true; case library::NumericTypeID::kS4: break; case library::NumericTypeID::kS8: data_type = CUDA_R_8I; return true; case library::NumericTypeID::kS16: break; case library::NumericTypeID::kS32: data_type = CUDA_R_32I; return true; case library::NumericTypeID::kS64: break; case library::NumericTypeID::kU4: break; case library::NumericTypeID::kU8: data_type = CUDA_R_8U; return true; case library::NumericTypeID::kU16: break; case library::NumericTypeID::kU32: data_type = CUDA_R_32U; return true; case library::NumericTypeID::kU64: break; case library::NumericTypeID::kB1: break; case library::NumericTypeID::kCF32: data_type = CUDA_C_32F; return true; case library::NumericTypeID::kCF64: data_type = CUDA_C_64F; return true; case library::NumericTypeID::kInvalid: default: break; } return false; } /// Maps a cutlass::SideMode to cuBLAS side mode bool get_cublas_side_mode(cublasSideMode_t& side, SideMode side_mode) { switch (side_mode) { case SideMode::kLeft: side = CUBLAS_SIDE_LEFT; return true; case SideMode::kRight: side = CUBLAS_SIDE_RIGHT; return true; default: break; } return false; } /// Maps a cutlass::FillMode to cuBLAS fill mode bool get_cublas_fill_mode(cublasFillMode_t& uplo, FillMode fill_mode) { switch (fill_mode) { case FillMode::kLower: uplo = CUBLAS_FILL_MODE_LOWER; return true; case FillMode::kUpper: uplo = CUBLAS_FILL_MODE_UPPER; return true; default: break; } return false; } /// Maps a cutlass::DiagType to cuBLAS diag type bool get_cublas_diag_type(cublasDiagType_t& diag, DiagType diag_type) { switch (diag_type) { case DiagType::kNonUnit: diag = CUBLAS_DIAG_NON_UNIT; return true; case DiagType::kUnit: diag = CUBLAS_DIAG_UNIT; return true; default: break; } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Gets the cublas algorithm given threadblock tile dimensions and math opcode class cublasGemmAlgo_t get_cublas_gemm_algo(int cta_m, int cta_n, int cta_k, library::OpcodeClassID opcode_class) { return (opcode_class == library::OpcodeClassID::kSimt ? CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular GEMM description Status cublas_satisfies(library::GemmDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.C.element == library::NumericTypeID::kS4 || desc.C.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasGemmExDispatcher::cublasGemmExDispatcher( library::GemmDescription const &op_desc, library::GemmUniversalConfiguration configuration_, library::GemmUniversalArguments arguments_, cublasGemmAlgo_t algorithm ): configuration(configuration_), arguments(arguments_), algo(algorithm), status(Status::kSuccess) { bool good = true; good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); good = (good && get_cublas_transpose_operation(trans_B, op_desc.B.layout, op_desc.transform_B)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_B, op_desc.B.element)); good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && op_desc.B.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case CUDA_R_32F: case CUDA_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case CUDA_R_64F: case CUDA_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case CUDA_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case CUDA_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes GEMM using these arguments cublasStatus_t cublasGemmExDispatcher::operator()(cublasHandle_t handle) { if (configuration.mode == library::GemmUniversalMode::kBatched) { return cublasGemmStridedBatchedEx( handle, trans_A, trans_B, configuration.problem_size.m(), configuration.problem_size.n(), configuration.problem_size.k(), arguments.alpha, arguments.A, data_type_A, int(configuration.lda), arguments.batch_stride_A, arguments.B, data_type_B, int(configuration.ldb), arguments.batch_stride_B, arguments.beta, arguments.D, data_type_C, int(configuration.ldc), arguments.batch_stride_C, configuration.batch_count, #if (__CUDACC_VER_MAJOR__ >= 11) compute_type, #else compute_data_type, #endif algo ); } else { return cublasGemmEx( handle, trans_A, trans_B, configuration.problem_size.m(), configuration.problem_size.n(), configuration.problem_size.k(), arguments.alpha, arguments.A, data_type_A, int(configuration.lda), arguments.B, data_type_B, int(configuration.ldb), arguments.beta, arguments.D, data_type_C, int(configuration.ldc), #if (__CUDACC_VER_MAJOR__ >= 11) compute_type, #else compute_data_type, #endif algo ); } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular RankK description Status cublas_satisfies(library::RankKDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.C.element == library::NumericTypeID::kS4 || desc.C.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasRankKDispatcher::cublasRankKDispatcher( library::RankKDescription const &op_desc, library::RankKConfiguration configuration_, library::RankKArguments arguments_ ): configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { blas_mode = op_desc.blas_mode; num_ranks = op_desc.num_ranks; bool good = true; good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case CUDA_R_32F: case CUDA_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case CUDA_R_64F: case CUDA_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case CUDA_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case CUDA_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes RankK using these arguments cublasStatus_t cublasRankKDispatcher::operator()(cublasHandle_t handle) { // SYRK and HERK if (num_ranks == 1) { if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) { return cublasDsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.beta), static_cast<double*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif return cublasSsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.beta), static_cast<float*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) { if (blas_mode == BlasMode::kHermitian) { return cublasZherk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const double*>(arguments.alpha), static_cast<const cuDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.beta), static_cast<cuDoubleComplex*>(arguments.D), int(configuration.ldc) ); } else { return cublasZsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const cuDoubleComplex*>(arguments.alpha), static_cast<const cuDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const cuDoubleComplex*>(arguments.beta), static_cast<cuDoubleComplex*>(arguments.D), int(configuration.ldc) ); } } else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif if (blas_mode == BlasMode::kHermitian) { return cublasCherk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const float*>(arguments.alpha), static_cast<const cuComplex*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.beta), static_cast<cuComplex*>(arguments.D), int(configuration.ldc) ); } else { return cublasCsyrk( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const cuComplex*>(arguments.alpha), static_cast<const cuComplex*>(arguments.A), int(configuration.lda), static_cast<const cuComplex*>(arguments.beta), static_cast<cuComplex*>(arguments.D), int(configuration.ldc) ); } } else { return CUBLAS_STATUS_NOT_SUPPORTED; } } // SYR2K and HER2K else if (num_ranks == 2) { if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) { return cublasDsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.B), int(configuration.ldb), static_cast<const double*>(arguments.beta), static_cast<double*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif return cublasSsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.B), int(configuration.ldb), static_cast<const float*>(arguments.beta), static_cast<float*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) { if (blas_mode == BlasMode::kHermitian) { return cublasZher2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const cuDoubleComplex*>(arguments.alpha), static_cast<const cuDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const cuDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const double*>(arguments.beta), static_cast<cuDoubleComplex*>(arguments.D), int(configuration.ldc) ); } else { return cublasZsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const cuDoubleComplex*>(arguments.alpha), static_cast<const cuDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const cuDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const cuDoubleComplex*>(arguments.beta), static_cast<cuDoubleComplex*>(arguments.D), int(configuration.ldc) ); } } else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif if (blas_mode == BlasMode::kHermitian) { return cublasCher2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const cuComplex*>(arguments.alpha), static_cast<const cuComplex*>(arguments.A), int(configuration.lda), static_cast<const cuComplex*>(arguments.B), int(configuration.ldb), static_cast<const float*>(arguments.beta), static_cast<cuComplex*>(arguments.D), int(configuration.ldc) ); } else { return cublasCsyr2k( handle, uplo, trans_A, configuration.problem_size.n(), configuration.problem_size.k(), static_cast<const cuComplex*>(arguments.alpha), static_cast<const cuComplex*>(arguments.A), int(configuration.lda), static_cast<const cuComplex*>(arguments.B), int(configuration.ldb), static_cast<const cuComplex*>(arguments.beta), static_cast<cuComplex*>(arguments.D), int(configuration.ldc) ); } } else { return CUBLAS_STATUS_NOT_SUPPORTED; } } else { return CUBLAS_STATUS_NOT_SUPPORTED; } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular TRMM description Status cublas_satisfies(library::TrmmDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.D.element == library::NumericTypeID::kS4 || desc.D.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasTrmmDispatcher::cublasTrmmDispatcher( library::TrmmDescription const &op_desc, library::TrmmConfiguration configuration_, library::TrmmArguments arguments_ ): configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { bool good = true; good = (good && get_cublas_transpose_operation(trans_A, op_desc.A.layout, op_desc.transform_A)); good = (good && get_cublas_side_mode(side, op_desc.side_mode)); good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); good = (good && get_cublas_diag_type(diag, op_desc.diag_type)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_B, op_desc.B.element)); good = (good && get_cublas_datatype(data_type_D, op_desc.D.element)); // if A is Transposed, then for cuBLAS that is inverted Fill Mode. if (trans_A == CUBLAS_OP_T || trans_A == CUBLAS_OP_C) { if (uplo == CUBLAS_FILL_MODE_LOWER) uplo = CUBLAS_FILL_MODE_UPPER; else uplo = CUBLAS_FILL_MODE_LOWER; } good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case CUDA_R_32F: case CUDA_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case CUDA_R_64F: case CUDA_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case CUDA_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case CUDA_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes TRMM using these arguments cublasStatus_t cublasTrmmDispatcher::operator()(cublasHandle_t handle) { if (data_type_A == data_type_D && data_type_A == CUDA_R_64F) { return cublasDtrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.B), int(configuration.ldb), static_cast<double*>(arguments.D), int(configuration.ldd) ); } else if (data_type_A == data_type_D && data_type_A == CUDA_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif return cublasStrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.B), int(configuration.ldb), static_cast<float*>(arguments.D), int(configuration.ldd) ); } else if (data_type_A == data_type_D && data_type_A == CUDA_C_64F) { return cublasZtrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const cuDoubleComplex*>(arguments.alpha), static_cast<const cuDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const cuDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<cuDoubleComplex*>(arguments.D), int(configuration.ldd) ); } else if (data_type_A == data_type_D && data_type_A == CUDA_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif return cublasCtrmm( handle, side, uplo, trans_A, diag, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const cuComplex*>(arguments.alpha), static_cast<const cuComplex*>(arguments.A), int(configuration.lda), static_cast<const cuComplex*>(arguments.B), int(configuration.ldb), static_cast<cuComplex*>(arguments.D), int(configuration.ldd) ); } else { return CUBLAS_STATUS_NOT_SUPPORTED; } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns a status if cuBLAS can satisfy a particular Symm description Status cublas_satisfies(library::SymmDescription const &desc) { auto const &math_instruction = desc.tile_description.math_instruction; if (math_instruction.element_accumulator == library::NumericTypeID::kS32 && math_instruction.opcode_class == library::OpcodeClassID::kTensorOp) { return Status::kErrorNotSupported; } // output type S4 and S8 not supported in cuBLAS if (desc.C.element == library::NumericTypeID::kS4 || desc.C.element == library::NumericTypeID::kS8) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.A.element == library::NumericTypeID::kBF16 || desc.A.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } // input type BF16 and TF32 not supported in cuBLAS if (desc.B.element == library::NumericTypeID::kBF16 || desc.B.element == library::NumericTypeID::kTF32) { return Status::kErrorNotSupported; } // only column major layout is supported in cuBLAS if (desc.A.layout != library::LayoutTypeID::kColumnMajor || desc.transform_A != library::ComplexTransform::kNone) { return Status::kErrorNotSupported; } return Status::kSuccess; } ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { cublasSymmDispatcher::cublasSymmDispatcher( library::SymmDescription const &op_desc, library::SymmConfiguration configuration_, library::SymmArguments arguments_ ): configuration(configuration_), arguments(arguments_), status(Status::kSuccess) { blas_mode = op_desc.blas_mode; bool good = true; good = (good && get_cublas_side_mode(side, op_desc.side_mode)); good = (good && get_cublas_fill_mode(uplo, op_desc.fill_mode)); good = (good && get_cublas_datatype(data_type_A, op_desc.A.element)); good = (good && get_cublas_datatype(data_type_C, op_desc.C.element)); good = (good && get_cublas_datatype( compute_data_type, op_desc.tile_description.math_instruction.element_accumulator)); // cuBLAS introduces a separate cublasComputeType enumerant to more precisely describe // internal numerical data types used in the computation. #if (__CUDACC_VER_MAJOR__ >= 11) library::OpcodeClassID const & opcode_class = op_desc.tile_description.math_instruction.opcode_class; if (good && op_desc.A.element == library::NumericTypeID::kF32 && opcode_class == library::OpcodeClassID::kTensorOp) { compute_type = CUBLAS_COMPUTE_32F_FAST_TF32; } else if (good) { bool const isPedantic = false; switch (compute_data_type) { case CUDA_R_32F: case CUDA_C_32F: compute_type = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; break; case CUDA_R_64F: case CUDA_C_64F: compute_type = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; break; case CUDA_R_16F: compute_type = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; break; case CUDA_R_32I: compute_type = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; break; default: good = false; break; } } #endif // __CUDACC_VER_MAJOR__ >= 11 if (!good) { status = Status::kErrorNotSupported; } } /// Executes Symm using these arguments cublasStatus_t cublasSymmDispatcher::operator()(cublasHandle_t handle) { // SYMM and HEMM if (data_type_A == data_type_C && data_type_A == CUDA_R_64F) { return cublasDsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const double*>(arguments.alpha), static_cast<const double*>(arguments.A), int(configuration.lda), static_cast<const double*>(arguments.B), int(configuration.ldb), static_cast<const double*>(arguments.beta), static_cast<double*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == CUDA_R_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif return cublasSsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const float*>(arguments.alpha), static_cast<const float*>(arguments.A), int(configuration.lda), static_cast<const float*>(arguments.B), int(configuration.ldb), static_cast<const float*>(arguments.beta), static_cast<float*>(arguments.D), int(configuration.ldc) ); } else if (data_type_A == data_type_C && data_type_A == CUDA_C_64F) { if (blas_mode == BlasMode::kHermitian) { return cublasZhemm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const cuDoubleComplex*>(arguments.alpha), static_cast<const cuDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const cuDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const cuDoubleComplex*>(arguments.beta), static_cast<cuDoubleComplex*>(arguments.D), int(configuration.ldc) ); } else { return cublasZsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const cuDoubleComplex*>(arguments.alpha), static_cast<const cuDoubleComplex*>(arguments.A), int(configuration.lda), static_cast<const cuDoubleComplex*>(arguments.B), int(configuration.ldb), static_cast<const cuDoubleComplex*>(arguments.beta), static_cast<cuDoubleComplex*>(arguments.D), int(configuration.ldc) ); } } else if (data_type_A == data_type_C && data_type_A == CUDA_C_32F) { #if (__CUDACC_VER_MAJOR__ >= 11) if (cublasSetMathMode(handle, CUBLAS_TF32_TENSOR_OP_MATH) != CUBLAS_STATUS_SUCCESS) return CUBLAS_STATUS_NOT_SUPPORTED; #endif if (blas_mode == BlasMode::kHermitian) { return cublasChemm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const cuComplex*>(arguments.alpha), static_cast<const cuComplex*>(arguments.A), int(configuration.lda), static_cast<const cuComplex*>(arguments.B), int(configuration.ldb), static_cast<const cuComplex*>(arguments.beta), static_cast<cuComplex*>(arguments.D), int(configuration.ldc) ); } else { return cublasCsymm( handle, side, uplo, configuration.problem_size.m(), configuration.problem_size.n(), static_cast<const cuComplex*>(arguments.alpha), static_cast<const cuComplex*>(arguments.A), int(configuration.lda), static_cast<const cuComplex*>(arguments.B), int(configuration.ldb), static_cast<const cuComplex*>(arguments.beta), static_cast<cuComplex*>(arguments.D), int(configuration.ldc) ); } } else { return CUBLAS_STATUS_NOT_SUPPORTED; } } } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass #endif // #if CUTLASS_ENABLE_CUBLAS
8f55ec935a0085f6ddb91ba1e566ef7575b62107.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/warp_perspective/backward_mat.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #include "src/cuda/warp_perspective/common.h" #include "src/cuda/utils.cuh" #include "src/cuda/warp_perspective/common.cuh" #include <cstdio> #include "src/cuda/cub/util_ptx.cuh" namespace megdnn { namespace cuda { namespace warp_perspective { template <typename Getter> __global__ void warp_perspective_bwd_mat_kernel( const float* hidden, const float* in, const float* mat, const int* midx, float* grad, int N, int C, int IH, int IW, int OH, int OW) { Getter getter; int n = blockIdx.z; int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; hidden += blockIdx.z * C*OH*OW; if (midx) { in += midx[n] * C * IH * IW; } else { in += n * C * IH * IW; } mat += n * 3*3; grad += n * 3*3; float grad_local[3*3]; memset(grad_local, 0, sizeof(grad_local)); if (ow < OW && oh < OH) { float numeratorw = mat[0]*ow + mat[1]*oh + mat[2]; float numeratorh = mat[3]*ow + mat[4]*oh + mat[5]; float denominator = mat[6]*ow + mat[7]*oh + mat[8]; float denominator2 = sqr(denominator); float iw = numeratorw / denominator; float ih = numeratorh / denominator; int iw0 = getter(floor(iw) + 0, IW); int iw1 = getter(floor(iw) + 1, IW); int ih0 = getter(floor(ih) + 0, IH); int ih1 = getter(floor(ih) + 1, IH); float palpha = ih - floor(ih); float pbeta = iw - floor(iw); float nalpha = 1.0f - palpha; float nbeta = 1.0f - pbeta; for (int c = 0; c < C; ++c) { float dalpha = 0, dbeta = 0; dalpha -= in[ih0*IW+iw0] * nbeta; dalpha -= in[ih0*IW+iw1] * pbeta; dalpha += in[ih1*IW+iw0] * nbeta; dalpha += in[ih1*IW+iw1] * pbeta; dbeta -= in[ih0*IW+iw0] * nalpha; dbeta += in[ih0*IW+iw1] * nalpha; dbeta -= in[ih1*IW+iw0] * palpha; dbeta += in[ih1*IW+iw1] * palpha; float dw[9], dh[9]; // dw[i] = d(iw)/d(mat[i]) dw[0] = ow / denominator; dw[1] = oh / denominator; dw[2] = 1.0f / denominator; dw[3] = 0.0f; dw[4] = 0.0f; dw[5] = 0.0f; float ddenominatorw = -numeratorw / denominator2; dw[6] = ow * ddenominatorw; dw[7] = oh * ddenominatorw; dw[8] = 1.0f * ddenominatorw; // dh[i] = d(ih)/d(mat[i]) dh[0] = 0.0f; dh[1] = 0.0f; dh[2] = 0.0f; dh[3] = ow / denominator; dh[4] = oh / denominator; dh[5] = 1.0f / denominator; float ddenominatorh = -numeratorh / denominator2; dh[6] = ow * ddenominatorh; dh[7] = oh * ddenominatorh; dh[8] = 1.0f * ddenominatorh; #pragma unroll for (int i = 0; i < 9; ++i) { grad_local[i] += hidden[oh * OW + ow] * dalpha * dh[i] + hidden[oh * OW + ow] * dbeta * dw[i]; } hidden += OH*OW; in += IH*IW; } } volatile __shared__ float grad_shared[16][32][3*3]; int tidy = threadIdx.y, tidx = threadIdx.x; #pragma unroll for (int i = 0; i < 9; ++i) grad_shared[tidy][tidx][i] = grad_local[i]; __syncthreads(); for (int k = 8; k >= 1; k >>= 1) { if (tidy < k) { #pragma unroll for (int i = 0; i < 9; ++i) { grad_shared[tidy][tidx][i] += grad_shared[tidy+k][tidx][i]; } } __syncthreads(); } if (tidy == 0 && tidx < 16) { for (int k = 16; k >= 1; k >>= 1) { if (tidx < k) { #pragma unroll for (int i = 0; i < 9; ++i) { grad_shared[tidy][tidx][i] += grad_shared[tidy][tidx + k][i]; } } cub::WARP_SYNC(0xffffffff); } } if (tidy == 0 && tidx == 0) { #pragma unroll for (int i = 0; i < 9; ++i) atomicAdd(grad+i, grad_shared[0][0][i]); } } __global__ void warp_perspective_bwd_mat_constant_kernel( const float* hidden, const float* in, const float* mat, const int* midx, float* grad, int N, int C, int IH, int IW, int OH, int OW, float bval) { int n = blockIdx.z; int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; hidden += blockIdx.z * C * OH * OW; if (midx) { in += midx[n] * C * IH * IW; } else { in += n * C * IH * IW; } mat += n * 3 * 3; grad += n * 3 * 3; float grad_local[3 * 3]; memset(grad_local, 0, sizeof(grad_local)); if (ow < OW && oh < OH) { float numeratorw = mat[0]*ow + mat[1]*oh + mat[2]; float numeratorh = mat[3]*ow + mat[4]*oh + mat[5]; float denominator = mat[6]*ow + mat[7]*oh + mat[8]; float denominator2 = sqr(denominator); float iw = numeratorw / denominator; float ih = numeratorh / denominator; int iw0 = floor(iw) + 0; int iw1 = floor(iw) + 1; int ih0 = floor(ih) + 0; int ih1 = floor(ih) + 1; bool okw0 = (iw0 >= 0 && iw0 < IW); bool okw1 = (iw1 >= 0 && iw1 < IW); bool okh0 = (ih0 >= 0 && ih0 < IH); bool okh1 = (ih1 >= 0 && ih1 < IH); iw0 = min(max(iw0, 0), IW-1); iw1 = min(max(iw1, 0), IW-1); ih0 = min(max(ih0, 0), IH-1); ih1 = min(max(ih1, 0), IH-1); float palpha = ih - floor(ih); float pbeta = iw - floor(iw); float nalpha = 1.0f - palpha; float nbeta = 1.0f - pbeta; for (int c = 0; c < C; ++c) { float v00 = (okh0 && okw0 ? in[ih0*IW+iw0] : bval); float v01 = (okh0 && okw1 ? in[ih0*IW+iw1] : bval); float v10 = (okh1 && okw0 ? in[ih1*IW+iw0] : bval); float v11 = (okh1 && okw1 ? in[ih1*IW+iw1] : bval); float dalpha = 0, dbeta = 0; dalpha -= v00 * nbeta; dalpha -= v01 * pbeta; dalpha += v10 * nbeta; dalpha += v11 * pbeta; dbeta -= v00 * nalpha; dbeta += v01 * nalpha; dbeta -= v10 * palpha; dbeta += v11 * palpha; float dw[9], dh[9]; // dw[i] = d(iw)/d(mat[i]) dw[0] = ow / denominator; dw[1] = oh / denominator; dw[2] = 1.0f / denominator; dw[3] = 0.0f; dw[4] = 0.0f; dw[5] = 0.0f; float ddenominatorw = -numeratorw / denominator2; dw[6] = ow * ddenominatorw; dw[7] = oh * ddenominatorw; dw[8] = 1.0f * ddenominatorw; // dh[i] = d(ih)/d(mat[i]) dh[0] = 0.0f; dh[1] = 0.0f; dh[2] = 0.0f; dh[3] = ow / denominator; dh[4] = oh / denominator; dh[5] = 1.0f / denominator; float ddenominatorh = -numeratorh / denominator2; dh[6] = ow * ddenominatorh; dh[7] = oh * ddenominatorh; dh[8] = 1.0f * ddenominatorh; #pragma unroll for (int i = 0; i < 9; ++i) { float delta = hidden[oh * OW + ow] * dalpha * dh[i] + hidden[oh * OW + ow] * dbeta * dw[i]; if (isfinite(delta)) grad_local[i] += delta; } hidden += OH*OW; in += IH*IW; } } volatile __shared__ float grad_shared[16][32][3*3]; int tidy = threadIdx.y, tidx = threadIdx.x; #pragma unroll for (int i = 0; i < 9; ++i) grad_shared[tidy][tidx][i] = grad_local[i]; __syncthreads(); for (int k = 8; k >= 1; k >>= 1) { if (tidy < k) { #pragma unroll for (int i = 0; i < 9; ++i) { grad_shared[tidy][tidx][i] += grad_shared[tidy+k][tidx][i]; } } __syncthreads(); } if (tidy == 0 && tidx < 16) { for (int k = 16; k >= 1; k >>= 1) { if (tidx < k) { #pragma unroll for (int i = 0; i < 9; ++i) grad_shared[tidy][tidx][i] += grad_shared[tidy][tidx + k][i]; } cub::WARP_SYNC(0xffffffff); } } if (tidy == 0 && tidx == 0) { #pragma unroll for (int i = 0; i < 9; ++i) atomicAdd(grad+i, grad_shared[0][0][i]); } } void backward_mat_proxy(const float* src, const float* mat, const int* midx, const float* diff, float* grad, int N, int C, int IH, int IW, int OH, int OW, float bval, BorderMode mode, hipStream_t stream) { const int BY = 16, BX = 32; dim3 threads(BX, BY); dim3 blocks((OW+BX-1)/BX, (OH+BY-1)/BY, N); cuda_check(hipMemsetAsync(grad, 0, sizeof(float) * N*3*3, stream)); #define DISPATCH(Getter) \ hipLaunchKernelGGL(( warp_perspective_bwd_mat_kernel<Getter>), dim3(blocks), dim3(threads), 0, stream, \ diff, src, mat, midx, grad, N, C, IH, IW, OH, OW); switch (mode) { case BORDER_REPLICATE: DISPATCH(ReplicateGetter); break; case BORDER_REFLECT: DISPATCH(ReflectGetter); break; case BORDER_REFLECT_101: DISPATCH(Reflect101Getter); break; case BORDER_WRAP: DISPATCH(WrapGetter); break; case BORDER_CONSTANT: hipLaunchKernelGGL(( warp_perspective_bwd_mat_constant_kernel), dim3(blocks), dim3(threads), 0, stream, diff, src, mat, midx, grad, N, C, IH, IW, OH, OW, bval); break; default: break; } #undef DISPATCH after_kernel_launch(); } } // namespace warp_perspective } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
8f55ec935a0085f6ddb91ba1e566ef7575b62107.cu
/** * \file dnn/src/cuda/warp_perspective/backward_mat.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #include "src/cuda/warp_perspective/common.h" #include "src/cuda/utils.cuh" #include "src/cuda/warp_perspective/common.cuh" #include <cstdio> #include "src/cuda/cub/util_ptx.cuh" namespace megdnn { namespace cuda { namespace warp_perspective { template <typename Getter> __global__ void warp_perspective_bwd_mat_kernel( const float* hidden, const float* in, const float* mat, const int* midx, float* grad, int N, int C, int IH, int IW, int OH, int OW) { Getter getter; int n = blockIdx.z; int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; hidden += blockIdx.z * C*OH*OW; if (midx) { in += midx[n] * C * IH * IW; } else { in += n * C * IH * IW; } mat += n * 3*3; grad += n * 3*3; float grad_local[3*3]; memset(grad_local, 0, sizeof(grad_local)); if (ow < OW && oh < OH) { float numeratorw = mat[0]*ow + mat[1]*oh + mat[2]; float numeratorh = mat[3]*ow + mat[4]*oh + mat[5]; float denominator = mat[6]*ow + mat[7]*oh + mat[8]; float denominator2 = sqr(denominator); float iw = numeratorw / denominator; float ih = numeratorh / denominator; int iw0 = getter(floor(iw) + 0, IW); int iw1 = getter(floor(iw) + 1, IW); int ih0 = getter(floor(ih) + 0, IH); int ih1 = getter(floor(ih) + 1, IH); float palpha = ih - floor(ih); float pbeta = iw - floor(iw); float nalpha = 1.0f - palpha; float nbeta = 1.0f - pbeta; for (int c = 0; c < C; ++c) { float dalpha = 0, dbeta = 0; dalpha -= in[ih0*IW+iw0] * nbeta; dalpha -= in[ih0*IW+iw1] * pbeta; dalpha += in[ih1*IW+iw0] * nbeta; dalpha += in[ih1*IW+iw1] * pbeta; dbeta -= in[ih0*IW+iw0] * nalpha; dbeta += in[ih0*IW+iw1] * nalpha; dbeta -= in[ih1*IW+iw0] * palpha; dbeta += in[ih1*IW+iw1] * palpha; float dw[9], dh[9]; // dw[i] = d(iw)/d(mat[i]) dw[0] = ow / denominator; dw[1] = oh / denominator; dw[2] = 1.0f / denominator; dw[3] = 0.0f; dw[4] = 0.0f; dw[5] = 0.0f; float ddenominatorw = -numeratorw / denominator2; dw[6] = ow * ddenominatorw; dw[7] = oh * ddenominatorw; dw[8] = 1.0f * ddenominatorw; // dh[i] = d(ih)/d(mat[i]) dh[0] = 0.0f; dh[1] = 0.0f; dh[2] = 0.0f; dh[3] = ow / denominator; dh[4] = oh / denominator; dh[5] = 1.0f / denominator; float ddenominatorh = -numeratorh / denominator2; dh[6] = ow * ddenominatorh; dh[7] = oh * ddenominatorh; dh[8] = 1.0f * ddenominatorh; #pragma unroll for (int i = 0; i < 9; ++i) { grad_local[i] += hidden[oh * OW + ow] * dalpha * dh[i] + hidden[oh * OW + ow] * dbeta * dw[i]; } hidden += OH*OW; in += IH*IW; } } volatile __shared__ float grad_shared[16][32][3*3]; int tidy = threadIdx.y, tidx = threadIdx.x; #pragma unroll for (int i = 0; i < 9; ++i) grad_shared[tidy][tidx][i] = grad_local[i]; __syncthreads(); for (int k = 8; k >= 1; k >>= 1) { if (tidy < k) { #pragma unroll for (int i = 0; i < 9; ++i) { grad_shared[tidy][tidx][i] += grad_shared[tidy+k][tidx][i]; } } __syncthreads(); } if (tidy == 0 && tidx < 16) { for (int k = 16; k >= 1; k >>= 1) { if (tidx < k) { #pragma unroll for (int i = 0; i < 9; ++i) { grad_shared[tidy][tidx][i] += grad_shared[tidy][tidx + k][i]; } } cub::WARP_SYNC(0xffffffff); } } if (tidy == 0 && tidx == 0) { #pragma unroll for (int i = 0; i < 9; ++i) atomicAdd(grad+i, grad_shared[0][0][i]); } } __global__ void warp_perspective_bwd_mat_constant_kernel( const float* hidden, const float* in, const float* mat, const int* midx, float* grad, int N, int C, int IH, int IW, int OH, int OW, float bval) { int n = blockIdx.z; int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; hidden += blockIdx.z * C * OH * OW; if (midx) { in += midx[n] * C * IH * IW; } else { in += n * C * IH * IW; } mat += n * 3 * 3; grad += n * 3 * 3; float grad_local[3 * 3]; memset(grad_local, 0, sizeof(grad_local)); if (ow < OW && oh < OH) { float numeratorw = mat[0]*ow + mat[1]*oh + mat[2]; float numeratorh = mat[3]*ow + mat[4]*oh + mat[5]; float denominator = mat[6]*ow + mat[7]*oh + mat[8]; float denominator2 = sqr(denominator); float iw = numeratorw / denominator; float ih = numeratorh / denominator; int iw0 = floor(iw) + 0; int iw1 = floor(iw) + 1; int ih0 = floor(ih) + 0; int ih1 = floor(ih) + 1; bool okw0 = (iw0 >= 0 && iw0 < IW); bool okw1 = (iw1 >= 0 && iw1 < IW); bool okh0 = (ih0 >= 0 && ih0 < IH); bool okh1 = (ih1 >= 0 && ih1 < IH); iw0 = min(max(iw0, 0), IW-1); iw1 = min(max(iw1, 0), IW-1); ih0 = min(max(ih0, 0), IH-1); ih1 = min(max(ih1, 0), IH-1); float palpha = ih - floor(ih); float pbeta = iw - floor(iw); float nalpha = 1.0f - palpha; float nbeta = 1.0f - pbeta; for (int c = 0; c < C; ++c) { float v00 = (okh0 && okw0 ? in[ih0*IW+iw0] : bval); float v01 = (okh0 && okw1 ? in[ih0*IW+iw1] : bval); float v10 = (okh1 && okw0 ? in[ih1*IW+iw0] : bval); float v11 = (okh1 && okw1 ? in[ih1*IW+iw1] : bval); float dalpha = 0, dbeta = 0; dalpha -= v00 * nbeta; dalpha -= v01 * pbeta; dalpha += v10 * nbeta; dalpha += v11 * pbeta; dbeta -= v00 * nalpha; dbeta += v01 * nalpha; dbeta -= v10 * palpha; dbeta += v11 * palpha; float dw[9], dh[9]; // dw[i] = d(iw)/d(mat[i]) dw[0] = ow / denominator; dw[1] = oh / denominator; dw[2] = 1.0f / denominator; dw[3] = 0.0f; dw[4] = 0.0f; dw[5] = 0.0f; float ddenominatorw = -numeratorw / denominator2; dw[6] = ow * ddenominatorw; dw[7] = oh * ddenominatorw; dw[8] = 1.0f * ddenominatorw; // dh[i] = d(ih)/d(mat[i]) dh[0] = 0.0f; dh[1] = 0.0f; dh[2] = 0.0f; dh[3] = ow / denominator; dh[4] = oh / denominator; dh[5] = 1.0f / denominator; float ddenominatorh = -numeratorh / denominator2; dh[6] = ow * ddenominatorh; dh[7] = oh * ddenominatorh; dh[8] = 1.0f * ddenominatorh; #pragma unroll for (int i = 0; i < 9; ++i) { float delta = hidden[oh * OW + ow] * dalpha * dh[i] + hidden[oh * OW + ow] * dbeta * dw[i]; if (isfinite(delta)) grad_local[i] += delta; } hidden += OH*OW; in += IH*IW; } } volatile __shared__ float grad_shared[16][32][3*3]; int tidy = threadIdx.y, tidx = threadIdx.x; #pragma unroll for (int i = 0; i < 9; ++i) grad_shared[tidy][tidx][i] = grad_local[i]; __syncthreads(); for (int k = 8; k >= 1; k >>= 1) { if (tidy < k) { #pragma unroll for (int i = 0; i < 9; ++i) { grad_shared[tidy][tidx][i] += grad_shared[tidy+k][tidx][i]; } } __syncthreads(); } if (tidy == 0 && tidx < 16) { for (int k = 16; k >= 1; k >>= 1) { if (tidx < k) { #pragma unroll for (int i = 0; i < 9; ++i) grad_shared[tidy][tidx][i] += grad_shared[tidy][tidx + k][i]; } cub::WARP_SYNC(0xffffffff); } } if (tidy == 0 && tidx == 0) { #pragma unroll for (int i = 0; i < 9; ++i) atomicAdd(grad+i, grad_shared[0][0][i]); } } void backward_mat_proxy(const float* src, const float* mat, const int* midx, const float* diff, float* grad, int N, int C, int IH, int IW, int OH, int OW, float bval, BorderMode mode, cudaStream_t stream) { const int BY = 16, BX = 32; dim3 threads(BX, BY); dim3 blocks((OW+BX-1)/BX, (OH+BY-1)/BY, N); cuda_check(cudaMemsetAsync(grad, 0, sizeof(float) * N*3*3, stream)); #define DISPATCH(Getter) \ warp_perspective_bwd_mat_kernel<Getter><<<blocks, threads, 0, stream>>>( \ diff, src, mat, midx, grad, N, C, IH, IW, OH, OW); switch (mode) { case BORDER_REPLICATE: DISPATCH(ReplicateGetter); break; case BORDER_REFLECT: DISPATCH(ReflectGetter); break; case BORDER_REFLECT_101: DISPATCH(Reflect101Getter); break; case BORDER_WRAP: DISPATCH(WrapGetter); break; case BORDER_CONSTANT: warp_perspective_bwd_mat_constant_kernel<<<blocks, threads, 0, stream>>>( diff, src, mat, midx, grad, N, C, IH, IW, OH, OW, bval); break; default: break; } #undef DISPATCH after_kernel_launch(); } } // namespace warp_perspective } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
7478a9f56da0006fe7de3ba60d51d36962c19f47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define N 100 __global__ void setupRandStates(hiprandState_t* state, unsigned int seed) { unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x; int thread_id = threadIdx.x + block_id * blockDim.x; // Each thread gets same seed, a different sequence number, no offset hiprand_init(seed, thread_id, 0, &state[thread_id]); }
7478a9f56da0006fe7de3ba60d51d36962c19f47.cu
#include "includes.h" #define N 100 __global__ void setupRandStates(curandState_t* state, unsigned int seed) { unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x; int thread_id = threadIdx.x + block_id * blockDim.x; // Each thread gets same seed, a different sequence number, no offset curand_init(seed, thread_id, 0, &state[thread_id]); }
0c055d2193236082d4745bf09a6b6ad64a563e1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2020 by Contributors * \file array/cuda/spmat_op_impl_csr.cu * \brief CSR operator CPU implementation */ #include <dgl/array.h> #include <vector> #include <unordered_set> #include <numeric> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { /*! * \brief Search adjacency list linearly for each (row, col) pair and * write the data under the matched position in the indices array to the output. * * If there is no match, -1 is written. * If there are multiple matches, only the first match is written. * If the given data array is null, write the matched position to the output. */ template <typename IdType> __global__ void _LinearSearchKernel( const IdType* indptr, const IdType* indices, const IdType* data, const IdType* row, const IdType* col, int64_t row_stride, int64_t col_stride, int64_t length, IdType* out) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { int rpos = tx * row_stride, cpos = tx * col_stride; IdType v = -1; const IdType r = row[rpos], c = col[cpos]; for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) { if (indices[i] == c) { v = (data)? data[i] : i; break; } } out[tx] = v; tx += stride_x; } } ///////////////////////////// CSRIsNonZero ///////////////////////////// template <DLDeviceType XPU, typename IdType> bool CSRIsNonZero(CSRMatrix csr, int64_t row, int64_t col) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = csr.indptr->ctx; IdArray rows = aten::VecToIdArray<int64_t>({row}, sizeof(IdType) * 8, ctx); IdArray cols = aten::VecToIdArray<int64_t>({col}, sizeof(IdType) * 8, ctx); rows = rows.CopyTo(ctx); cols = cols.CopyTo(ctx); IdArray out = aten::NewIdArray(1, ctx, sizeof(IdType) * 8); const IdType* data = nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(_LinearSearchKernel, 1, 1, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), data, rows.Ptr<IdType>(), cols.Ptr<IdType>(), 1, 1, 1, out.Ptr<IdType>()); out = out.CopyTo(DLContext{kDLCPU, 0}); return *out.Ptr<IdType>() != -1; } template bool CSRIsNonZero<kDLGPU, int32_t>(CSRMatrix, int64_t, int64_t); template bool CSRIsNonZero<kDLGPU, int64_t>(CSRMatrix, int64_t, int64_t); template <DLDeviceType XPU, typename IdType> NDArray CSRIsNonZero(CSRMatrix csr, NDArray row, NDArray col) { const auto rowlen = row->shape[0]; const auto collen = col->shape[0]; const auto rstlen = ::max(rowlen, collen); NDArray rst = NDArray::Empty({rstlen}, row->dtype, row->ctx); if (rstlen == 0) return rst; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int nt = cuda::FindNumThreads(rstlen); const int nb = (rstlen + nt - 1) / nt; const IdType* data = nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(_LinearSearchKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), data, row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, rstlen, rst.Ptr<IdType>()); return rst != -1; } template NDArray CSRIsNonZero<kDLGPU, int32_t>(CSRMatrix, NDArray, NDArray); template NDArray CSRIsNonZero<kDLGPU, int64_t>(CSRMatrix, NDArray, NDArray); ///////////////////////////// CSRHasDuplicate ///////////////////////////// /*! * \brief Check whether each row does not have any duplicate entries. * Assume the CSR is sorted. */ template <typename IdType> __global__ void _SegmentHasNoDuplicate( const IdType* indptr, const IdType* indices, int64_t num_rows, int8_t* flags) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_rows) { bool f = true; for (IdType i = indptr[tx] + 1; f && i < indptr[tx + 1]; ++i) { f = (indices[i - 1] != indices[i]); } flags[tx] = static_cast<int8_t>(f); tx += stride_x; } } template <DLDeviceType XPU, typename IdType> bool CSRHasDuplicate(CSRMatrix csr) { if (!csr.sorted) csr = CSRSort(csr); const auto& ctx = csr.indptr->ctx; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of num_rows bytes. It wastes a little bit memory but should // be fine. int8_t* flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, csr.num_rows)); const int nt = cuda::FindNumThreads(csr.num_rows); const int nb = (csr.num_rows + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentHasNoDuplicate, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), csr.num_rows, flags); bool ret = cuda::AllTrue(flags, csr.num_rows, ctx); device->FreeWorkspace(ctx, flags); return !ret; } template bool CSRHasDuplicate<kDLGPU, int32_t>(CSRMatrix csr); template bool CSRHasDuplicate<kDLGPU, int64_t>(CSRMatrix csr); ///////////////////////////// CSRGetRowNNZ ///////////////////////////// template <DLDeviceType XPU, typename IdType> int64_t CSRGetRowNNZ(CSRMatrix csr, int64_t row) { const IdType cur = aten::IndexSelect<IdType>(csr.indptr, row); const IdType next = aten::IndexSelect<IdType>(csr.indptr, row + 1); return next - cur; } template int64_t CSRGetRowNNZ<kDLGPU, int32_t>(CSRMatrix, int64_t); template int64_t CSRGetRowNNZ<kDLGPU, int64_t>(CSRMatrix, int64_t); template <typename IdType> __global__ void _CSRGetRowNNZKernel( const IdType* vid, const IdType* indptr, IdType* out, int64_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { const IdType vv = vid[tx]; out[tx] = indptr[vv + 1] - indptr[vv]; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowNNZ(CSRMatrix csr, NDArray rows) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto len = rows->shape[0]; const IdType* vid_data = static_cast<IdType*>(rows->data); const IdType* indptr_data = static_cast<IdType*>(csr.indptr->data); NDArray rst = NDArray::Empty({len}, rows->dtype, rows->ctx); IdType* rst_data = static_cast<IdType*>(rst->data); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_CSRGetRowNNZKernel, nb, nt, 0, thr_entry->stream, vid_data, indptr_data, rst_data, len); return rst; } template NDArray CSRGetRowNNZ<kDLGPU, int32_t>(CSRMatrix, NDArray); template NDArray CSRGetRowNNZ<kDLGPU, int64_t>(CSRMatrix, NDArray); ///////////////////////////// CSRGetRowColumnIndices ///////////////////////////// template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowColumnIndices(CSRMatrix csr, int64_t row) { const int64_t len = impl::CSRGetRowNNZ<XPU, IdType>(csr, row); const int64_t offset = aten::IndexSelect<IdType>(csr.indptr, row) * sizeof(IdType); return csr.indices.CreateView({len}, csr.indices->dtype, offset); } template NDArray CSRGetRowColumnIndices<kDLGPU, int32_t>(CSRMatrix, int64_t); template NDArray CSRGetRowColumnIndices<kDLGPU, int64_t>(CSRMatrix, int64_t); ///////////////////////////// CSRGetRowData ///////////////////////////// template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowData(CSRMatrix csr, int64_t row) { const int64_t len = impl::CSRGetRowNNZ<XPU, IdType>(csr, row); const int64_t offset = aten::IndexSelect<IdType>(csr.indptr, row) * sizeof(IdType); if (aten::CSRHasData(csr)) return csr.data.CreateView({len}, csr.data->dtype, offset); else return aten::Range(offset, offset + len, csr.indptr->dtype.bits, csr.indptr->ctx); } template NDArray CSRGetRowData<kDLGPU, int32_t>(CSRMatrix, int64_t); template NDArray CSRGetRowData<kDLGPU, int64_t>(CSRMatrix, int64_t); ///////////////////////////// CSRSliceRows ///////////////////////////// template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceRows(CSRMatrix csr, int64_t start, int64_t end) { const int64_t num_rows = end - start; const IdType st_pos = aten::IndexSelect<IdType>(csr.indptr, start); const IdType ed_pos = aten::IndexSelect<IdType>(csr.indptr, end); const IdType nnz = ed_pos - st_pos; IdArray ret_indptr = aten::IndexSelect(csr.indptr, start, end + 1) - st_pos; // indices and data can be view arrays IdArray ret_indices = csr.indices.CreateView( {nnz}, csr.indices->dtype, st_pos * sizeof(IdType)); IdArray ret_data; if (CSRHasData(csr)) ret_data = csr.data.CreateView({nnz}, csr.data->dtype, st_pos * sizeof(IdType)); else ret_data = aten::Range(st_pos, ed_pos, csr.indptr->dtype.bits, csr.indptr->ctx); return CSRMatrix(num_rows, csr.num_cols, ret_indptr, ret_indices, ret_data, csr.sorted); } template CSRMatrix CSRSliceRows<kDLGPU, int32_t>(CSRMatrix, int64_t, int64_t); template CSRMatrix CSRSliceRows<kDLGPU, int64_t>(CSRMatrix, int64_t, int64_t); /*! * \brief Copy data segment to output buffers * * For the i^th row r = row[i], copy the data from indptr[r] ~ indptr[r+1] * to the out_data from out_indptr[i] ~ out_indptr[i+1] * * If the provided `data` array is nullptr, write the read index to the out_data. * */ template <typename IdType, typename DType> __global__ void _SegmentCopyKernel( const IdType* indptr, const DType* data, const IdType* row, int64_t row_stride, int64_t length, const IdType* out_indptr, DType* out_data) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { int rpos = tx * row_stride; const IdType r = row[rpos]; DType* out_buf = out_data + out_indptr[tx]; for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) { *(out_buf++) = data? data[i] : i; } tx += stride_x; } } template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceRows(CSRMatrix csr, NDArray rows) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int64_t len = rows->shape[0]; IdArray ret_indptr = aten::CumSum(aten::CSRGetRowNNZ(csr, rows), true); const int64_t nnz = aten::IndexSelect<IdType>(ret_indptr, len); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; // Copy indices. IdArray ret_indices = NDArray::Empty({nnz}, csr.indptr->dtype, csr.indptr->ctx); CUDA_KERNEL_CALL(_SegmentCopyKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), rows.Ptr<IdType>(), 1, len, ret_indptr.Ptr<IdType>(), ret_indices.Ptr<IdType>()); // Copy data. IdArray ret_data = NDArray::Empty({nnz}, csr.indptr->dtype, csr.indptr->ctx); CUDA_KERNEL_CALL(_SegmentCopyKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), CSRHasData(csr)? csr.data.Ptr<IdType>() : nullptr, rows.Ptr<IdType>(), 1, len, ret_indptr.Ptr<IdType>(), ret_data.Ptr<IdType>()); return CSRMatrix(len, csr.num_cols, ret_indptr, ret_indices, ret_data, csr.sorted); } template CSRMatrix CSRSliceRows<kDLGPU, int32_t>(CSRMatrix , NDArray); template CSRMatrix CSRSliceRows<kDLGPU, int64_t>(CSRMatrix , NDArray); ///////////////////////////// CSRGetData ///////////////////////////// template <DLDeviceType XPU, typename IdType> IdArray CSRGetData(CSRMatrix csr, NDArray row, NDArray col) { const int64_t rowlen = row->shape[0]; const int64_t collen = col->shape[0]; CHECK((rowlen == collen) || (rowlen == 1) || (collen == 1)) << "Invalid row and col id array."; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; const int64_t rstlen = ::max(rowlen, collen); IdArray rst = NDArray::Empty({rstlen}, row->dtype, row->ctx); if (rstlen == 0) return rst; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int nt = cuda::FindNumThreads(rstlen); const int nb = (rstlen + nt - 1) / nt; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(_LinearSearchKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), CSRHasData(csr)? csr.data.Ptr<IdType>() : nullptr, row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, rstlen, rst.Ptr<IdType>()); return rst; } template NDArray CSRGetData<kDLGPU, int32_t>(CSRMatrix csr, NDArray rows, NDArray cols); template NDArray CSRGetData<kDLGPU, int64_t>(CSRMatrix csr, NDArray rows, NDArray cols); ///////////////////////////// CSRGetDataAndIndices ///////////////////////////// /*! * \brief Generate a 0-1 mask for each index that hits the provided (row, col) * index. * * Examples: * Given a CSR matrix (with duplicate entries) as follows: * [[0, 1, 2, 0, 0], * [1, 0, 0, 0, 0], * [0, 0, 1, 1, 0], * [0, 0, 0, 0, 0]] * Given rows: [0, 1], cols: [0, 2, 3] * The result mask is: [0, 1, 1, 1, 0, 0] */ template <typename IdType> __global__ void _SegmentMaskKernel( const IdType* indptr, const IdType* indices, const IdType* row, const IdType* col, int64_t row_stride, int64_t col_stride, int64_t length, IdType* mask) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { int rpos = tx * row_stride, cpos = tx * col_stride; const IdType r = row[rpos], c = col[cpos]; for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) { if (indices[i] == c) { mask[i] = 1; } } tx += stride_x; } } /*! * \brief Search for the insertion positions for needle in the hay. * * The hay is a list of sorted elements and the result is the insertion position * of each needle so that the insertion still gives sorted order. * * It essentially perform binary search to find lower bound for each needle * elements. Require the largest elements in the hay is larger than the given * needle elements. Commonly used in searching for row IDs of a given set of * coordinates. */ template <typename IdType> __global__ void _SortedSearchKernel( const IdType* hay, int64_t hay_size, const IdType* needles, int64_t num_needles, IdType* pos) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_needles) { const IdType ele = needles[tx]; // binary search IdType lo = 0, hi = hay_size - 1; while (lo < hi) { IdType mid = (lo + hi) >> 1; if (hay[mid] <= ele) { lo = mid + 1; } else { hi = mid; } } pos[tx] = (hay[hi] == ele)? hi : hi - 1; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> std::vector<NDArray> CSRGetDataAndIndices(CSRMatrix csr, NDArray row, NDArray col) { const auto rowlen = row->shape[0]; const auto collen = col->shape[0]; const auto len = ::max(rowlen, collen); if (len == 0) return {NullArray(), NullArray(), NullArray()}; const auto& ctx = row->ctx; const auto nbits = row->dtype.bits; const int64_t nnz = csr.indices->shape[0]; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // Generate a 0-1 mask for matched (row, col) positions. IdArray mask = Full(0, nnz, nbits, ctx); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentMaskKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, len, mask.Ptr<IdType>()); IdArray idx = AsNumBits(NonZero(mask), nbits); if (idx->shape[0] == 0) // No data. Return three empty arrays. return {idx, idx, idx}; // Search for row index IdArray ret_row = NewIdArray(idx->shape[0], ctx, nbits); const int nt2 = cuda::FindNumThreads(idx->shape[0]); const int nb2 = (idx->shape[0] + nt - 1) / nt; CUDA_KERNEL_CALL(_SortedSearchKernel, nb2, nt2, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.num_rows, idx.Ptr<IdType>(), idx->shape[0], ret_row.Ptr<IdType>()); // Column & data can be obtained by index select. IdArray ret_col = IndexSelect(csr.indices, idx); IdArray ret_data = CSRHasData(csr)? IndexSelect(csr.data, idx) : idx; return {ret_row, ret_col, ret_data}; } template std::vector<NDArray> CSRGetDataAndIndices<kDLGPU, int32_t>( CSRMatrix csr, NDArray rows, NDArray cols); template std::vector<NDArray> CSRGetDataAndIndices<kDLGPU, int64_t>( CSRMatrix csr, NDArray rows, NDArray cols); ///////////////////////////// CSRSliceMatrix ///////////////////////////// /*! * \brief Generate a 0-1 mask for each index whose column is in the provided set. * It also counts the number of masked values per row. */ template <typename IdType> __global__ void _SegmentMaskColKernel( const IdType* indptr, const IdType* indices, int64_t num_rows, const IdType* col, int64_t col_len, IdType* mask, IdType* count) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; // TODO(minjie): consider putting the col array in shared memory. while (tx < num_rows) { IdType cnt = 0; for (IdType i = indptr[tx]; i < indptr[tx + 1]; ++i) { const IdType cur_c = indices[i]; for (int64_t j = 0; j < col_len; ++j) { if (cur_c == col[j]) { mask[i] = 1; ++cnt; break; } } } count[tx] = cnt; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceMatrix(CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = rows->ctx; const auto& dtype = rows->dtype; const auto nbits = dtype.bits; const int64_t new_nrows = rows->shape[0]; const int64_t new_ncols = cols->shape[0]; if (new_nrows == 0 || new_ncols == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // First slice rows csr = CSRSliceRows(csr, rows); if (csr.indices->shape[0] == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // Generate a 0-1 mask for matched (row, col) positions. IdArray mask = Full(0, csr.indices->shape[0], nbits, ctx); // A count for how many masked values per row. IdArray count = NewIdArray(csr.num_rows, ctx, nbits); const int nt = cuda::FindNumThreads(csr.num_rows); const int nb = (csr.num_rows + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentMaskColKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), csr.num_rows, cols.Ptr<IdType>(), cols->shape[0], mask.Ptr<IdType>(), count.Ptr<IdType>()); IdArray idx = AsNumBits(NonZero(mask), nbits); if (idx->shape[0] == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // Indptr needs to be adjusted according to the new nnz per row. IdArray ret_indptr = CumSum(count, true); // Column & data can be obtained by index select. IdArray ret_col = IndexSelect(csr.indices, idx); IdArray ret_data = CSRHasData(csr)? IndexSelect(csr.data, idx) : idx; // Relabel column IdArray col_hash = NewIdArray(csr.num_cols, ctx, nbits); Scatter_(cols, Range(0, cols->shape[0], nbits, ctx), col_hash); ret_col = IndexSelect(col_hash, ret_col); return CSRMatrix(new_nrows, new_ncols, ret_indptr, ret_col, ret_data); } template CSRMatrix CSRSliceMatrix<kDLGPU, int32_t>( CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); template CSRMatrix CSRSliceMatrix<kDLGPU, int64_t>( CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); } // namespace impl } // namespace aten } // namespace dgl
0c055d2193236082d4745bf09a6b6ad64a563e1c.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/spmat_op_impl_csr.cu * \brief CSR operator CPU implementation */ #include <dgl/array.h> #include <vector> #include <unordered_set> #include <numeric> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { /*! * \brief Search adjacency list linearly for each (row, col) pair and * write the data under the matched position in the indices array to the output. * * If there is no match, -1 is written. * If there are multiple matches, only the first match is written. * If the given data array is null, write the matched position to the output. */ template <typename IdType> __global__ void _LinearSearchKernel( const IdType* indptr, const IdType* indices, const IdType* data, const IdType* row, const IdType* col, int64_t row_stride, int64_t col_stride, int64_t length, IdType* out) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { int rpos = tx * row_stride, cpos = tx * col_stride; IdType v = -1; const IdType r = row[rpos], c = col[cpos]; for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) { if (indices[i] == c) { v = (data)? data[i] : i; break; } } out[tx] = v; tx += stride_x; } } ///////////////////////////// CSRIsNonZero ///////////////////////////// template <DLDeviceType XPU, typename IdType> bool CSRIsNonZero(CSRMatrix csr, int64_t row, int64_t col) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = csr.indptr->ctx; IdArray rows = aten::VecToIdArray<int64_t>({row}, sizeof(IdType) * 8, ctx); IdArray cols = aten::VecToIdArray<int64_t>({col}, sizeof(IdType) * 8, ctx); rows = rows.CopyTo(ctx); cols = cols.CopyTo(ctx); IdArray out = aten::NewIdArray(1, ctx, sizeof(IdType) * 8); const IdType* data = nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(_LinearSearchKernel, 1, 1, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), data, rows.Ptr<IdType>(), cols.Ptr<IdType>(), 1, 1, 1, out.Ptr<IdType>()); out = out.CopyTo(DLContext{kDLCPU, 0}); return *out.Ptr<IdType>() != -1; } template bool CSRIsNonZero<kDLGPU, int32_t>(CSRMatrix, int64_t, int64_t); template bool CSRIsNonZero<kDLGPU, int64_t>(CSRMatrix, int64_t, int64_t); template <DLDeviceType XPU, typename IdType> NDArray CSRIsNonZero(CSRMatrix csr, NDArray row, NDArray col) { const auto rowlen = row->shape[0]; const auto collen = col->shape[0]; const auto rstlen = std::max(rowlen, collen); NDArray rst = NDArray::Empty({rstlen}, row->dtype, row->ctx); if (rstlen == 0) return rst; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int nt = cuda::FindNumThreads(rstlen); const int nb = (rstlen + nt - 1) / nt; const IdType* data = nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(_LinearSearchKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), data, row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, rstlen, rst.Ptr<IdType>()); return rst != -1; } template NDArray CSRIsNonZero<kDLGPU, int32_t>(CSRMatrix, NDArray, NDArray); template NDArray CSRIsNonZero<kDLGPU, int64_t>(CSRMatrix, NDArray, NDArray); ///////////////////////////// CSRHasDuplicate ///////////////////////////// /*! * \brief Check whether each row does not have any duplicate entries. * Assume the CSR is sorted. */ template <typename IdType> __global__ void _SegmentHasNoDuplicate( const IdType* indptr, const IdType* indices, int64_t num_rows, int8_t* flags) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_rows) { bool f = true; for (IdType i = indptr[tx] + 1; f && i < indptr[tx + 1]; ++i) { f = (indices[i - 1] != indices[i]); } flags[tx] = static_cast<int8_t>(f); tx += stride_x; } } template <DLDeviceType XPU, typename IdType> bool CSRHasDuplicate(CSRMatrix csr) { if (!csr.sorted) csr = CSRSort(csr); const auto& ctx = csr.indptr->ctx; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of num_rows bytes. It wastes a little bit memory but should // be fine. int8_t* flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, csr.num_rows)); const int nt = cuda::FindNumThreads(csr.num_rows); const int nb = (csr.num_rows + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentHasNoDuplicate, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), csr.num_rows, flags); bool ret = cuda::AllTrue(flags, csr.num_rows, ctx); device->FreeWorkspace(ctx, flags); return !ret; } template bool CSRHasDuplicate<kDLGPU, int32_t>(CSRMatrix csr); template bool CSRHasDuplicate<kDLGPU, int64_t>(CSRMatrix csr); ///////////////////////////// CSRGetRowNNZ ///////////////////////////// template <DLDeviceType XPU, typename IdType> int64_t CSRGetRowNNZ(CSRMatrix csr, int64_t row) { const IdType cur = aten::IndexSelect<IdType>(csr.indptr, row); const IdType next = aten::IndexSelect<IdType>(csr.indptr, row + 1); return next - cur; } template int64_t CSRGetRowNNZ<kDLGPU, int32_t>(CSRMatrix, int64_t); template int64_t CSRGetRowNNZ<kDLGPU, int64_t>(CSRMatrix, int64_t); template <typename IdType> __global__ void _CSRGetRowNNZKernel( const IdType* vid, const IdType* indptr, IdType* out, int64_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { const IdType vv = vid[tx]; out[tx] = indptr[vv + 1] - indptr[vv]; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowNNZ(CSRMatrix csr, NDArray rows) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto len = rows->shape[0]; const IdType* vid_data = static_cast<IdType*>(rows->data); const IdType* indptr_data = static_cast<IdType*>(csr.indptr->data); NDArray rst = NDArray::Empty({len}, rows->dtype, rows->ctx); IdType* rst_data = static_cast<IdType*>(rst->data); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_CSRGetRowNNZKernel, nb, nt, 0, thr_entry->stream, vid_data, indptr_data, rst_data, len); return rst; } template NDArray CSRGetRowNNZ<kDLGPU, int32_t>(CSRMatrix, NDArray); template NDArray CSRGetRowNNZ<kDLGPU, int64_t>(CSRMatrix, NDArray); ///////////////////////////// CSRGetRowColumnIndices ///////////////////////////// template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowColumnIndices(CSRMatrix csr, int64_t row) { const int64_t len = impl::CSRGetRowNNZ<XPU, IdType>(csr, row); const int64_t offset = aten::IndexSelect<IdType>(csr.indptr, row) * sizeof(IdType); return csr.indices.CreateView({len}, csr.indices->dtype, offset); } template NDArray CSRGetRowColumnIndices<kDLGPU, int32_t>(CSRMatrix, int64_t); template NDArray CSRGetRowColumnIndices<kDLGPU, int64_t>(CSRMatrix, int64_t); ///////////////////////////// CSRGetRowData ///////////////////////////// template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowData(CSRMatrix csr, int64_t row) { const int64_t len = impl::CSRGetRowNNZ<XPU, IdType>(csr, row); const int64_t offset = aten::IndexSelect<IdType>(csr.indptr, row) * sizeof(IdType); if (aten::CSRHasData(csr)) return csr.data.CreateView({len}, csr.data->dtype, offset); else return aten::Range(offset, offset + len, csr.indptr->dtype.bits, csr.indptr->ctx); } template NDArray CSRGetRowData<kDLGPU, int32_t>(CSRMatrix, int64_t); template NDArray CSRGetRowData<kDLGPU, int64_t>(CSRMatrix, int64_t); ///////////////////////////// CSRSliceRows ///////////////////////////// template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceRows(CSRMatrix csr, int64_t start, int64_t end) { const int64_t num_rows = end - start; const IdType st_pos = aten::IndexSelect<IdType>(csr.indptr, start); const IdType ed_pos = aten::IndexSelect<IdType>(csr.indptr, end); const IdType nnz = ed_pos - st_pos; IdArray ret_indptr = aten::IndexSelect(csr.indptr, start, end + 1) - st_pos; // indices and data can be view arrays IdArray ret_indices = csr.indices.CreateView( {nnz}, csr.indices->dtype, st_pos * sizeof(IdType)); IdArray ret_data; if (CSRHasData(csr)) ret_data = csr.data.CreateView({nnz}, csr.data->dtype, st_pos * sizeof(IdType)); else ret_data = aten::Range(st_pos, ed_pos, csr.indptr->dtype.bits, csr.indptr->ctx); return CSRMatrix(num_rows, csr.num_cols, ret_indptr, ret_indices, ret_data, csr.sorted); } template CSRMatrix CSRSliceRows<kDLGPU, int32_t>(CSRMatrix, int64_t, int64_t); template CSRMatrix CSRSliceRows<kDLGPU, int64_t>(CSRMatrix, int64_t, int64_t); /*! * \brief Copy data segment to output buffers * * For the i^th row r = row[i], copy the data from indptr[r] ~ indptr[r+1] * to the out_data from out_indptr[i] ~ out_indptr[i+1] * * If the provided `data` array is nullptr, write the read index to the out_data. * */ template <typename IdType, typename DType> __global__ void _SegmentCopyKernel( const IdType* indptr, const DType* data, const IdType* row, int64_t row_stride, int64_t length, const IdType* out_indptr, DType* out_data) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { int rpos = tx * row_stride; const IdType r = row[rpos]; DType* out_buf = out_data + out_indptr[tx]; for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) { *(out_buf++) = data? data[i] : i; } tx += stride_x; } } template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceRows(CSRMatrix csr, NDArray rows) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int64_t len = rows->shape[0]; IdArray ret_indptr = aten::CumSum(aten::CSRGetRowNNZ(csr, rows), true); const int64_t nnz = aten::IndexSelect<IdType>(ret_indptr, len); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; // Copy indices. IdArray ret_indices = NDArray::Empty({nnz}, csr.indptr->dtype, csr.indptr->ctx); CUDA_KERNEL_CALL(_SegmentCopyKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), rows.Ptr<IdType>(), 1, len, ret_indptr.Ptr<IdType>(), ret_indices.Ptr<IdType>()); // Copy data. IdArray ret_data = NDArray::Empty({nnz}, csr.indptr->dtype, csr.indptr->ctx); CUDA_KERNEL_CALL(_SegmentCopyKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), CSRHasData(csr)? csr.data.Ptr<IdType>() : nullptr, rows.Ptr<IdType>(), 1, len, ret_indptr.Ptr<IdType>(), ret_data.Ptr<IdType>()); return CSRMatrix(len, csr.num_cols, ret_indptr, ret_indices, ret_data, csr.sorted); } template CSRMatrix CSRSliceRows<kDLGPU, int32_t>(CSRMatrix , NDArray); template CSRMatrix CSRSliceRows<kDLGPU, int64_t>(CSRMatrix , NDArray); ///////////////////////////// CSRGetData ///////////////////////////// template <DLDeviceType XPU, typename IdType> IdArray CSRGetData(CSRMatrix csr, NDArray row, NDArray col) { const int64_t rowlen = row->shape[0]; const int64_t collen = col->shape[0]; CHECK((rowlen == collen) || (rowlen == 1) || (collen == 1)) << "Invalid row and col id array."; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; const int64_t rstlen = std::max(rowlen, collen); IdArray rst = NDArray::Empty({rstlen}, row->dtype, row->ctx); if (rstlen == 0) return rst; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int nt = cuda::FindNumThreads(rstlen); const int nb = (rstlen + nt - 1) / nt; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(_LinearSearchKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), CSRHasData(csr)? csr.data.Ptr<IdType>() : nullptr, row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, rstlen, rst.Ptr<IdType>()); return rst; } template NDArray CSRGetData<kDLGPU, int32_t>(CSRMatrix csr, NDArray rows, NDArray cols); template NDArray CSRGetData<kDLGPU, int64_t>(CSRMatrix csr, NDArray rows, NDArray cols); ///////////////////////////// CSRGetDataAndIndices ///////////////////////////// /*! * \brief Generate a 0-1 mask for each index that hits the provided (row, col) * index. * * Examples: * Given a CSR matrix (with duplicate entries) as follows: * [[0, 1, 2, 0, 0], * [1, 0, 0, 0, 0], * [0, 0, 1, 1, 0], * [0, 0, 0, 0, 0]] * Given rows: [0, 1], cols: [0, 2, 3] * The result mask is: [0, 1, 1, 1, 0, 0] */ template <typename IdType> __global__ void _SegmentMaskKernel( const IdType* indptr, const IdType* indices, const IdType* row, const IdType* col, int64_t row_stride, int64_t col_stride, int64_t length, IdType* mask) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { int rpos = tx * row_stride, cpos = tx * col_stride; const IdType r = row[rpos], c = col[cpos]; for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) { if (indices[i] == c) { mask[i] = 1; } } tx += stride_x; } } /*! * \brief Search for the insertion positions for needle in the hay. * * The hay is a list of sorted elements and the result is the insertion position * of each needle so that the insertion still gives sorted order. * * It essentially perform binary search to find lower bound for each needle * elements. Require the largest elements in the hay is larger than the given * needle elements. Commonly used in searching for row IDs of a given set of * coordinates. */ template <typename IdType> __global__ void _SortedSearchKernel( const IdType* hay, int64_t hay_size, const IdType* needles, int64_t num_needles, IdType* pos) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_needles) { const IdType ele = needles[tx]; // binary search IdType lo = 0, hi = hay_size - 1; while (lo < hi) { IdType mid = (lo + hi) >> 1; if (hay[mid] <= ele) { lo = mid + 1; } else { hi = mid; } } pos[tx] = (hay[hi] == ele)? hi : hi - 1; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> std::vector<NDArray> CSRGetDataAndIndices(CSRMatrix csr, NDArray row, NDArray col) { const auto rowlen = row->shape[0]; const auto collen = col->shape[0]; const auto len = std::max(rowlen, collen); if (len == 0) return {NullArray(), NullArray(), NullArray()}; const auto& ctx = row->ctx; const auto nbits = row->dtype.bits; const int64_t nnz = csr.indices->shape[0]; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // Generate a 0-1 mask for matched (row, col) positions. IdArray mask = Full(0, nnz, nbits, ctx); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentMaskKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, len, mask.Ptr<IdType>()); IdArray idx = AsNumBits(NonZero(mask), nbits); if (idx->shape[0] == 0) // No data. Return three empty arrays. return {idx, idx, idx}; // Search for row index IdArray ret_row = NewIdArray(idx->shape[0], ctx, nbits); const int nt2 = cuda::FindNumThreads(idx->shape[0]); const int nb2 = (idx->shape[0] + nt - 1) / nt; CUDA_KERNEL_CALL(_SortedSearchKernel, nb2, nt2, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.num_rows, idx.Ptr<IdType>(), idx->shape[0], ret_row.Ptr<IdType>()); // Column & data can be obtained by index select. IdArray ret_col = IndexSelect(csr.indices, idx); IdArray ret_data = CSRHasData(csr)? IndexSelect(csr.data, idx) : idx; return {ret_row, ret_col, ret_data}; } template std::vector<NDArray> CSRGetDataAndIndices<kDLGPU, int32_t>( CSRMatrix csr, NDArray rows, NDArray cols); template std::vector<NDArray> CSRGetDataAndIndices<kDLGPU, int64_t>( CSRMatrix csr, NDArray rows, NDArray cols); ///////////////////////////// CSRSliceMatrix ///////////////////////////// /*! * \brief Generate a 0-1 mask for each index whose column is in the provided set. * It also counts the number of masked values per row. */ template <typename IdType> __global__ void _SegmentMaskColKernel( const IdType* indptr, const IdType* indices, int64_t num_rows, const IdType* col, int64_t col_len, IdType* mask, IdType* count) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; // TODO(minjie): consider putting the col array in shared memory. while (tx < num_rows) { IdType cnt = 0; for (IdType i = indptr[tx]; i < indptr[tx + 1]; ++i) { const IdType cur_c = indices[i]; for (int64_t j = 0; j < col_len; ++j) { if (cur_c == col[j]) { mask[i] = 1; ++cnt; break; } } } count[tx] = cnt; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceMatrix(CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = rows->ctx; const auto& dtype = rows->dtype; const auto nbits = dtype.bits; const int64_t new_nrows = rows->shape[0]; const int64_t new_ncols = cols->shape[0]; if (new_nrows == 0 || new_ncols == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // First slice rows csr = CSRSliceRows(csr, rows); if (csr.indices->shape[0] == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // Generate a 0-1 mask for matched (row, col) positions. IdArray mask = Full(0, csr.indices->shape[0], nbits, ctx); // A count for how many masked values per row. IdArray count = NewIdArray(csr.num_rows, ctx, nbits); const int nt = cuda::FindNumThreads(csr.num_rows); const int nb = (csr.num_rows + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentMaskColKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), csr.num_rows, cols.Ptr<IdType>(), cols->shape[0], mask.Ptr<IdType>(), count.Ptr<IdType>()); IdArray idx = AsNumBits(NonZero(mask), nbits); if (idx->shape[0] == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // Indptr needs to be adjusted according to the new nnz per row. IdArray ret_indptr = CumSum(count, true); // Column & data can be obtained by index select. IdArray ret_col = IndexSelect(csr.indices, idx); IdArray ret_data = CSRHasData(csr)? IndexSelect(csr.data, idx) : idx; // Relabel column IdArray col_hash = NewIdArray(csr.num_cols, ctx, nbits); Scatter_(cols, Range(0, cols->shape[0], nbits, ctx), col_hash); ret_col = IndexSelect(col_hash, ret_col); return CSRMatrix(new_nrows, new_ncols, ret_indptr, ret_col, ret_data); } template CSRMatrix CSRSliceMatrix<kDLGPU, int32_t>( CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); template CSRMatrix CSRSliceMatrix<kDLGPU, int64_t>( CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); } // namespace impl } // namespace aten } // namespace dgl
6c6781387a8ef44ec8dd919359b7d137f8fd7b0b.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize(scalar_t coord, int size, bool align_corners) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] return ((coord + 1.f) * size - 1) / 2; } } static __forceinline__ __device__ bool within_bounds(int h, int H) { return h >= 0 && h < H; } // Clips coordinates to between 0 and clip_limit - 1 template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates(scalar_t in, int clip_limit) { return ::min(static_cast<scalar_t>(clip_limit - 1), ::max(in, static_cast<scalar_t>(0))); } // Reflects coordinates until they fall between low and high (inclusive). // The bounds are passed as twice their value so that half-integer values // can be represented as ints. template <typename scalar_t> static __forceinline__ __device__ scalar_t reflect_coordinates(scalar_t in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<scalar_t>(0); } scalar_t min = static_cast<scalar_t>(twice_low) / 2; scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2; in = ::fabs(in - min); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. scalar_t extra = ::fmod(in, span); int flips = static_cast<int>(::floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } template<typename scalar_t> static __forceinline__ __device__ scalar_t safe_downgrade_to_int_range(scalar_t x){ // -100.0 does not have special meaning. This is just to make sure // it's not within_bounds_2d or within_bounds_3d, and does not cause // undefined behavior. See #35506. if (x > INT_MAX-1 || x < INT_MIN || !::isfinite(static_cast<double>(x))) return static_cast<scalar_t>(-100.0); return x; } template<typename scalar_t> static __forceinline__ __device__ scalar_t compute_coordinates(scalar_t coord, int size, bool padding_mode, bool align_corners) { if (padding_mode) { // True for border padding // clip coordinates to image borders coord = clip_coordinates(coord, size); } coord = safe_downgrade_to_int_range(coord); return coord; } // Computes the pixel source index value for a grid coordinate template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index( scalar_t coord, int size, bool padding_mode, bool align_corners) { coord = grid_sampler_unnormalize(coord, size, align_corners); coord = compute_coordinates(coord, size, padding_mode, align_corners); return coord; } template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int size, bool align_corners, scalar_t *grad_in) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] *grad_in = static_cast<scalar_t>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] *grad_in = static_cast<scalar_t>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates_set_grad(scalar_t in, int clip_limit, scalar_t *grad_in) { // Note that it is important for the gradient calculation that borders // are considered out of bounds. if (in <= static_cast<scalar_t>(0)) { *grad_in = static_cast<scalar_t>(0); return static_cast<scalar_t>(0); } else { scalar_t max = static_cast<scalar_t>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index_set_grad( scalar_t coord, int size, bool padding_mode, bool align_corners, scalar_t *grad_in) { scalar_t grad_clip, grad_refl; coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); if (padding_mode) { // true for border padding // clip coordinates to image borders coord = clip_coordinates_set_grad(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } coord = safe_downgrade_to_int_range(coord); return coord; } template <typename scalar_t> __global__ void grid_sample1d_cuda_forward_kernel( const scalar_t* __restrict__ input, const scalar_t* __restrict__ grid, scalar_t* __restrict__ output, bool padding_mode, bool align_corners, const int N, const int L_in, const int batch_size, const int C, const int L_out, const int L_tgt ) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ const int l = index % L_out; const int c = (index/L_out) % C; const int l_t = index / (C * L_out) % L_tgt; const int n = index / (C * L_out * L_tgt); const int grid_offset = n * L_out * L_tgt + l_t * L_out+ l; scalar_t x = grid[grid_offset]; scalar_t ix = grid_sampler_compute_source_index(x, L_in, padding_mode, align_corners); const int index_left = ::floor(ix); const int index_right = index_left + 1; // const int output_offset = l + c * L_out + n * C * L_out; const int output_offset = l + c * L_out + l_t * C * L_out + n * L_tgt * C * L_out; scalar_t surface_left = index_right-ix; scalar_t surface_right = ix-index_left; const int input_left_offset = index_left + c * L_in + n * L_in * C; const int input_right_offset = index_right + c * L_in + n * L_in * C; output[output_offset] = static_cast<scalar_t>(0); if(within_bounds(index_left, L_in)){ output[output_offset] += input[input_left_offset] * surface_left; } if(within_bounds(index_right, L_in)){ output[output_offset] += input[input_right_offset] * surface_right; } // output[output_offset] = (ix-index_left) * (input[input_right_offset] - input[input_left_offset]) + input[input_left_offset]; } } template <typename scalar_t> __global__ void grid_sample1d_cuda_backward_kernel( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ grid, scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_grid, bool padding_mode, bool align_corners, const int N, const int L_in, const int batch_size, const int C, const int L_out, const int L_tgt ) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ const int l = index % L_out; const int l_t = (index / L_out) % L_tgt; const int n = index / (L_out * L_tgt); const int grid_offset = n * L_out * L_tgt + l_t * L_out + l; // scalar_t x = grid[grid_offset]; scalar_t gix_mult; scalar_t ix = grid_sampler_compute_source_index_set_grad(x, L_in, padding_mode, align_corners, &gix_mult); // const int index_left = ::floor(ix); const int index_right = index_left + 1; scalar_t surface_left = index_right-ix; scalar_t surface_right = ix-index_left; scalar_t iy = static_cast<scalar_t>(0); scalar_t iy_se = static_cast<scalar_t>(1); scalar_t gix = static_cast<scalar_t>(0); for(int c=0; c<C;++c){ const int output_offset = l + c * L_out + l_t * C * L_out + n * L_tgt * C * L_out; const int grad_output_offset = l + c * L_out + l_t * C * L_out + n * L_tgt * C * L_out; const int input_left_offset = index_left + c * L_in + n * L_in * C; const int input_right_offset = index_right + c * L_in + n * L_in * C; scalar_t gOut = grad_output[grad_output_offset]; if (within_bounds(index_left, L_in)) { atomicAdd(grad_input + input_left_offset, surface_left * gOut); } if(within_bounds(index_right, L_in)){ atomicAdd(grad_input + input_right_offset, surface_right * gOut); } if (within_bounds(index_left, L_in)) { // order is important // gix -= surface_left * input[input_left_offset] * gOut; gix -= input[input_left_offset] * (iy_se-iy) * gOut; } if(within_bounds(index_right, L_in)){ // gix += surface_right * input[input_right_offset] * gOut; gix += input[input_right_offset] * (iy_se-iy) * gOut; } } grad_grid[grid_offset] = gix*gix_mult; } } } torch::Tensor grid_sample1d_cuda_forward( torch::Tensor input, torch::Tensor grid, bool padding_mode, bool align_corners) { // const auto batch_size = input.size(0); const auto C = input.size(1); const auto L_in = input.size(2); const auto L_tgt = grid.size(1); const auto L_out = grid.size(2); torch::Tensor output = torch::zeros({batch_size,L_tgt, C, L_out}, input.options()); const int threads = 1024; // const dim3 blocks((C*L_out + threads - 1) / threads, batch_size); const int N = C*L_out*batch_size*L_tgt; const int blocks = (N + threads-1)/ threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "lltm_forward_cuda", ([&] { hipLaunchKernelGGL(( grid_sample1d_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.data<scalar_t>(), grid.data<scalar_t>(), output.data<scalar_t>(), padding_mode, align_corners, N, L_in, batch_size, C, L_out, L_tgt ); })); return output; } std::vector<torch::Tensor> grid_sample1d_cuda_backward( torch::Tensor grad_output, torch::Tensor input, torch::Tensor grid, bool padding_mode, bool align_corners) { const auto batch_size = input.size(0); const auto C = input.size(1); const auto L_in = input.size(2); const auto L_tgt = grid.size(1); const auto L_out = grid.size(2); torch::Tensor grad_input = torch::zeros_like(input); torch::Tensor grad_grid = torch::zeros_like(grid); const int threads = 1024; const int N = L_out * L_tgt * batch_size; const int blocks = (N + threads-1)/ threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "grid_sample1d_backward_cuda", ([&] { hipLaunchKernelGGL(( grid_sample1d_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_output.data<scalar_t>(), input.data<scalar_t>(), grid.data<scalar_t>(), grad_input.data<scalar_t>(), grad_grid.data<scalar_t>(), padding_mode, align_corners, N, L_in, batch_size, C, L_out, L_tgt ); })); return {grad_input, grad_grid}; }
6c6781387a8ef44ec8dd919359b7d137f8fd7b0b.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize(scalar_t coord, int size, bool align_corners) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] return ((coord + 1.f) * size - 1) / 2; } } static __forceinline__ __device__ bool within_bounds(int h, int H) { return h >= 0 && h < H; } // Clips coordinates to between 0 and clip_limit - 1 template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates(scalar_t in, int clip_limit) { return ::min(static_cast<scalar_t>(clip_limit - 1), ::max(in, static_cast<scalar_t>(0))); } // Reflects coordinates until they fall between low and high (inclusive). // The bounds are passed as twice their value so that half-integer values // can be represented as ints. template <typename scalar_t> static __forceinline__ __device__ scalar_t reflect_coordinates(scalar_t in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<scalar_t>(0); } scalar_t min = static_cast<scalar_t>(twice_low) / 2; scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2; in = ::fabs(in - min); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. scalar_t extra = ::fmod(in, span); int flips = static_cast<int>(::floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } template<typename scalar_t> static __forceinline__ __device__ scalar_t safe_downgrade_to_int_range(scalar_t x){ // -100.0 does not have special meaning. This is just to make sure // it's not within_bounds_2d or within_bounds_3d, and does not cause // undefined behavior. See #35506. if (x > INT_MAX-1 || x < INT_MIN || !::isfinite(static_cast<double>(x))) return static_cast<scalar_t>(-100.0); return x; } template<typename scalar_t> static __forceinline__ __device__ scalar_t compute_coordinates(scalar_t coord, int size, bool padding_mode, bool align_corners) { if (padding_mode) { // True for border padding // clip coordinates to image borders coord = clip_coordinates(coord, size); } coord = safe_downgrade_to_int_range(coord); return coord; } // Computes the pixel source index value for a grid coordinate template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index( scalar_t coord, int size, bool padding_mode, bool align_corners) { coord = grid_sampler_unnormalize(coord, size, align_corners); coord = compute_coordinates(coord, size, padding_mode, align_corners); return coord; } template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int size, bool align_corners, scalar_t *grad_in) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] *grad_in = static_cast<scalar_t>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] *grad_in = static_cast<scalar_t>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates_set_grad(scalar_t in, int clip_limit, scalar_t *grad_in) { // Note that it is important for the gradient calculation that borders // are considered out of bounds. if (in <= static_cast<scalar_t>(0)) { *grad_in = static_cast<scalar_t>(0); return static_cast<scalar_t>(0); } else { scalar_t max = static_cast<scalar_t>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index_set_grad( scalar_t coord, int size, bool padding_mode, bool align_corners, scalar_t *grad_in) { scalar_t grad_clip, grad_refl; coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); if (padding_mode) { // true for border padding // clip coordinates to image borders coord = clip_coordinates_set_grad(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } coord = safe_downgrade_to_int_range(coord); return coord; } template <typename scalar_t> __global__ void grid_sample1d_cuda_forward_kernel( const scalar_t* __restrict__ input, const scalar_t* __restrict__ grid, scalar_t* __restrict__ output, bool padding_mode, bool align_corners, const int N, const int L_in, const int batch_size, const int C, const int L_out, const int L_tgt ) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ const int l = index % L_out; const int c = (index/L_out) % C; const int l_t = index / (C * L_out) % L_tgt; const int n = index / (C * L_out * L_tgt); const int grid_offset = n * L_out * L_tgt + l_t * L_out+ l; scalar_t x = grid[grid_offset]; scalar_t ix = grid_sampler_compute_source_index(x, L_in, padding_mode, align_corners); const int index_left = ::floor(ix); const int index_right = index_left + 1; // const int output_offset = l + c * L_out + n * C * L_out; const int output_offset = l + c * L_out + l_t * C * L_out + n * L_tgt * C * L_out; scalar_t surface_left = index_right-ix; scalar_t surface_right = ix-index_left; const int input_left_offset = index_left + c * L_in + n * L_in * C; const int input_right_offset = index_right + c * L_in + n * L_in * C; output[output_offset] = static_cast<scalar_t>(0); if(within_bounds(index_left, L_in)){ output[output_offset] += input[input_left_offset] * surface_left; } if(within_bounds(index_right, L_in)){ output[output_offset] += input[input_right_offset] * surface_right; } // output[output_offset] = (ix-index_left) * (input[input_right_offset] - input[input_left_offset]) + input[input_left_offset]; } } template <typename scalar_t> __global__ void grid_sample1d_cuda_backward_kernel( const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ grid, scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_grid, bool padding_mode, bool align_corners, const int N, const int L_in, const int batch_size, const int C, const int L_out, const int L_tgt ) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ const int l = index % L_out; const int l_t = (index / L_out) % L_tgt; const int n = index / (L_out * L_tgt); const int grid_offset = n * L_out * L_tgt + l_t * L_out + l; // scalar_t x = grid[grid_offset]; scalar_t gix_mult; scalar_t ix = grid_sampler_compute_source_index_set_grad(x, L_in, padding_mode, align_corners, &gix_mult); // const int index_left = ::floor(ix); const int index_right = index_left + 1; scalar_t surface_left = index_right-ix; scalar_t surface_right = ix-index_left; scalar_t iy = static_cast<scalar_t>(0); scalar_t iy_se = static_cast<scalar_t>(1); scalar_t gix = static_cast<scalar_t>(0); for(int c=0; c<C;++c){ const int output_offset = l + c * L_out + l_t * C * L_out + n * L_tgt * C * L_out; const int grad_output_offset = l + c * L_out + l_t * C * L_out + n * L_tgt * C * L_out; const int input_left_offset = index_left + c * L_in + n * L_in * C; const int input_right_offset = index_right + c * L_in + n * L_in * C; scalar_t gOut = grad_output[grad_output_offset]; if (within_bounds(index_left, L_in)) { atomicAdd(grad_input + input_left_offset, surface_left * gOut); } if(within_bounds(index_right, L_in)){ atomicAdd(grad_input + input_right_offset, surface_right * gOut); } if (within_bounds(index_left, L_in)) { // order is important // gix -= surface_left * input[input_left_offset] * gOut; gix -= input[input_left_offset] * (iy_se-iy) * gOut; } if(within_bounds(index_right, L_in)){ // gix += surface_right * input[input_right_offset] * gOut; gix += input[input_right_offset] * (iy_se-iy) * gOut; } } grad_grid[grid_offset] = gix*gix_mult; } } } torch::Tensor grid_sample1d_cuda_forward( torch::Tensor input, torch::Tensor grid, bool padding_mode, bool align_corners) { // const auto batch_size = input.size(0); const auto C = input.size(1); const auto L_in = input.size(2); const auto L_tgt = grid.size(1); const auto L_out = grid.size(2); torch::Tensor output = torch::zeros({batch_size,L_tgt, C, L_out}, input.options()); const int threads = 1024; // const dim3 blocks((C*L_out + threads - 1) / threads, batch_size); const int N = C*L_out*batch_size*L_tgt; const int blocks = (N + threads-1)/ threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "lltm_forward_cuda", ([&] { grid_sample1d_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( input.data<scalar_t>(), grid.data<scalar_t>(), output.data<scalar_t>(), padding_mode, align_corners, N, L_in, batch_size, C, L_out, L_tgt ); })); return output; } std::vector<torch::Tensor> grid_sample1d_cuda_backward( torch::Tensor grad_output, torch::Tensor input, torch::Tensor grid, bool padding_mode, bool align_corners) { const auto batch_size = input.size(0); const auto C = input.size(1); const auto L_in = input.size(2); const auto L_tgt = grid.size(1); const auto L_out = grid.size(2); torch::Tensor grad_input = torch::zeros_like(input); torch::Tensor grad_grid = torch::zeros_like(grid); const int threads = 1024; const int N = L_out * L_tgt * batch_size; const int blocks = (N + threads-1)/ threads; AT_DISPATCH_FLOATING_TYPES(input.type(), "grid_sample1d_backward_cuda", ([&] { grid_sample1d_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( grad_output.data<scalar_t>(), input.data<scalar_t>(), grid.data<scalar_t>(), grad_input.data<scalar_t>(), grad_grid.data<scalar_t>(), padding_mode, align_corners, N, L_in, batch_size, C, L_out, L_tgt ); })); return {grad_input, grad_grid}; }
e19460b6755c2cba04d0b53e323ee463647b3829.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> #include <iostream> #include <time.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include "setup.h" struct Parameters { int screenW; int screenH; int max_ray_depth; ///< maximum allowed ray depth (applies to all rays) int ns_aa; ///< number of camera rays in one pixel (along one axis) int ns_area_light; ///< number samples per area light source int lightNum; int primNum; int* types; int* bsdfIndexes; float* positions; float* normals; float4* woopPositions; float3* camOffset; float* frameBuffer; int* BVHPrimMap; GPUBVHNode* BVHRoot; }; struct BVHParameters { float sceneMin[3]; float sceneExtent[3]; int numObjects; GPUBVHNode *leafNodes; GPUBVHNode *internalNodes; unsigned int*sortedMortonCodes; int *sortedObjectIDs; int *types; float *positions; }; //#define PARALLEL_BUILD_BVH #define TILE_DIM 1 #include "kernel.hip" #include <map> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ //using namespace std; float3* gpu_camOffset; float4* gpu_woopPositions; CUDAPathTracer::CUDAPathTracer(PathTracer* _pathTracer) { pathTracer = _pathTracer; } CUDAPathTracer::~CUDAPathTracer() { hipFree(gpu_types); hipFree(gpu_bsdfIndexes); hipFree(gpu_positions); hipFree(gpu_normals); hipFree(gpu_woopPositions); hipFree(frameBuffer); hipFree(BVHPrimMap); #ifdef PARALLEL_BUILD_BVH // hipFree(gpu_sortedMortonCodes); free at the end of setup hipFree(gpu_leafNodes); hipFree(gpu_internalNodes); #else freeBVHNode(BVHRoot); #endif } void CUDAPathTracer::startRayTracing() { int xTileNum = TILE_DIM; int yTileNum = TILE_DIM; int width = (screenW + xTileNum - 1) / xTileNum; int height = (screenH + yTileNum - 1) / yTileNum; int blockDim = 256; int gridDim = (width * height + blockDim - 1) / blockDim; for(int i = 0; i < xTileNum; i++) for(int j = 0; j < yTileNum; j++) { hipLaunchKernelGGL(( traceScene), dim3(gridDim), dim3(blockDim), 0, 0, i * width, j * height, width, height); } hipError_t err = hipPeekAtLastError(); hipDeviceSynchronize(); hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::startRayTracingPT() { int xTileNum = TILE_DIM; int yTileNum = TILE_DIM; int width = (screenW + xTileNum - 1) / xTileNum; int height = (screenH + yTileNum - 1) / yTileNum; int blockDim = BLOCK_DIM; int gridDim = 256; unsigned long long zero = 0; for(int i = 0; i < xTileNum; i++) for(int j = 0; j < yTileNum; j++) { int tmp_width = min(screenW - i * width, width); int tmp_height = min(screenH - j * height, height); hipLaunchKernelGGL(( traceScenePT), dim3(gridDim), dim3(blockDim), 0, 0, i * width, j * height, tmp_width, tmp_height); hipMemcpyToSymbol(globalPoolNextRay, &zero, sizeof(unsigned long long)); hipDeviceSynchronize(); } hipError_t err = hipPeekAtLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::init() { hipDeviceReset(); loadCamera(); loadPrimitives(); loadLights(); #ifdef PARALLEL_BUILD_BVH buildBVH(); #else loadBVH(); #endif createFrameBuffer(); loadParameters(); hipDeviceSetLimit(hipLimitStackSize, 1024 * 24); //printInfo<<<1, 1>>>(); //hipDeviceSynchronize(); } void CUDAPathTracer::createFrameBuffer() { hipError_t err = hipSuccess; screenH = pathTracer->frameBuffer.h; screenW = pathTracer->frameBuffer.w; err = hipMalloc((void**)&frameBuffer, 3 * screenW * screenH * sizeof(float)); hipMemset(frameBuffer, 0, 3 * screenW * screenH * sizeof(float)); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::loadCamera() { //printf("load camera\n"); //printf("camera: %p\n", pathTracer->camera); GPUCamera tmpCam; Camera* cam = pathTracer->camera; tmpCam.widthDivDist = cam->screenW / cam->screenDist; tmpCam.heightDivDist = cam->screenH / cam->screenDist; //printf("after loading camera\n"); for (int i = 0; i < 9; i++) { tmpCam.c2w[i] = cam->c2w(i / 3, i % 3); } for (int i = 0; i < 3; i++) { tmpCam.pos[i] = cam->pos[i]; } hipError_t err = hipSuccess; //hipMalloc((void**)&gpu_camera,sizeof(GPUCamera)); err = hipMemcpyToSymbol(const_camera, &tmpCam,sizeof(GPUCamera)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::loadPrimitives() { vector<Primitive *>& primitives = pathTracer->primitives; int N = primitives.size(); int types[N]; int bsdfs[N]; float *positions = new float[9 * N]; float *normals = new float[9 * N]; float4* woopPositions = new float4[3 * N]; primNum = N; map<BSDF*, int> BSDFMap; for (int i = 0; i < N; i++) { primMap[primitives[i]] = i; types[i] = primitives[i]->getType(); BSDF* bsdf = primitives[i]->get_bsdf(); if (BSDFMap.find(bsdf) == BSDFMap.end()) { int index = BSDFMap.size(); BSDFMap[bsdf] = index; bsdfs[i] = index; } else{ bsdfs[i] = BSDFMap[bsdf]; } if (types[i] == 0) { Vector3D o = ((Sphere*)primitives[i])->o; positions[9 * i] = o[0]; positions[9 * i + 1] = o[1]; positions[9 * i + 2] = o[2]; positions[9 * i + 3] = ((Sphere*)primitives[i])->r; } else{ const Mesh* mesh = ((Triangle*)primitives[i])->mesh; int v1 = ((Triangle*)primitives[i])->v1; int v2 = ((Triangle*)primitives[i])->v2; int v3 = ((Triangle*)primitives[i])->v3; positions[9 * i] = mesh->positions[v1][0]; positions[9 * i + 1] = mesh->positions[v1][1]; positions[9 * i + 2] = mesh->positions[v1][2]; normals[9 * i] = mesh->normals[v1][0]; normals[9 * i + 1] = mesh->normals[v1][1]; normals[9 * i + 2] = mesh->normals[v1][2]; positions[9 * i + 3] = mesh->positions[v2][0] - positions[9 * i]; positions[9 * i + 4] = mesh->positions[v2][1] - positions[9 * i + 1]; positions[9 * i + 5] = mesh->positions[v2][2] - positions[9 * i + 2]; normals[9 * i + 3] = mesh->normals[v2][0]; normals[9 * i + 4] = mesh->normals[v2][1]; normals[9 * i + 5] = mesh->normals[v2][2]; positions[9 * i + 6] = mesh->positions[v3][0] - positions[9 * i]; positions[9 * i + 7] = mesh->positions[v3][1] - positions[9 * i + 1]; positions[9 * i + 8] = mesh->positions[v3][2] - positions[9 * i + 2]; normals[9 * i + 6] = mesh->normals[v3][0]; normals[9 * i + 7] = mesh->normals[v3][1]; normals[9 * i + 8] = mesh->normals[v3][2]; Matrix4x4 mtx; Vector3D c0(positions[9 * i + 3], positions[9 * i + 4], positions[9 * i + 5]); Vector3D c1(positions[9 * i + 6], positions[9 * i + 7], positions[9 * i + 8]); Vector3D c2 = cross(c0, c1); Vector3D c3(positions[9 * i], positions[9 * i + 1], positions[9 * i + 2]); mtx[0] = Vector4D(c0); mtx[1] = Vector4D(c1); mtx[2] = Vector4D(c2); mtx[3] = Vector4D(c3, 1.0); mtx = mtx.inv(); woopPositions[3 * i] = make_float4(mtx(2,0), mtx(2,1), mtx(2,2), -mtx(2,3)); woopPositions[3 * i + 1] = make_float4(mtx(0,0), mtx(0,1), mtx(0,2), mtx(0,3)); woopPositions[3 * i + 2] = make_float4(mtx(1,0), mtx(1,1), mtx(1,2), mtx(1,3)); } } GPUBSDF BSDFArray[BSDFMap.size()]; for (auto itr = BSDFMap.begin(); itr != BSDFMap.end(); itr++) { GPUBSDF& gpu_bsdf = BSDFArray[itr->second]; BSDF* bsdf = itr->first; gpu_bsdf.type = bsdf->getType(); if (gpu_bsdf.type == 0) { Spectrum& albedo = ((DiffuseBSDF*)bsdf)->albedo; gpu_bsdf.albedo[0] = albedo.r; gpu_bsdf.albedo[1] = albedo.g; gpu_bsdf.albedo[2] = albedo.b; } else if(gpu_bsdf.type == 1){ Spectrum& reflectance = ((MirrorBSDF*)bsdf)->reflectance; gpu_bsdf.reflectance[0] = reflectance.r; gpu_bsdf.reflectance[1] = reflectance.g; gpu_bsdf.reflectance[2] = reflectance.b; } else if(gpu_bsdf.type == 2){ Spectrum& transmittance = ((RefractionBSDF*)bsdf)->transmittance; gpu_bsdf.transmittance[0] = transmittance.r; gpu_bsdf.transmittance[1] = transmittance.g; gpu_bsdf.transmittance[2] = transmittance.b; gpu_bsdf.ior = ((RefractionBSDF*)bsdf)->ior; } else if(gpu_bsdf.type == 3){ Spectrum& reflectance = ((GlassBSDF*)bsdf)->reflectance; gpu_bsdf.reflectance[0] = reflectance.r; gpu_bsdf.reflectance[1] = reflectance.g; gpu_bsdf.reflectance[2] = reflectance.b; Spectrum& transmittance = ((GlassBSDF*)bsdf)->transmittance; gpu_bsdf.transmittance[0] = transmittance.r; gpu_bsdf.transmittance[1] = transmittance.g; gpu_bsdf.transmittance[2] = transmittance.b; gpu_bsdf.ior = ((GlassBSDF*)bsdf)->ior; } else if(gpu_bsdf.type == 4){ Spectrum& albedo = ((EmissionBSDF*)bsdf)->radiance; gpu_bsdf.albedo[0] = albedo.r; gpu_bsdf.albedo[1] = albedo.g; gpu_bsdf.albedo[2] = albedo.b; } } hipMalloc((void**)&gpu_types, N * sizeof(int)); hipMalloc((void**)&gpu_bsdfIndexes, N * sizeof(int)); hipMalloc((void**)&gpu_positions, 9 * N * sizeof(float)); hipMalloc((void**)&gpu_normals, 9 * N * sizeof(float)); hipMalloc((void**)&gpu_woopPositions, 3 * N * sizeof(float4)); hipMemcpy(gpu_types, types, N * sizeof(int),hipMemcpyHostToDevice); hipMemcpy(gpu_bsdfIndexes, bsdfs, N * sizeof(int),hipMemcpyHostToDevice); hipMemcpy(gpu_positions, positions, 9 * N * sizeof(float),hipMemcpyHostToDevice); hipMemcpy(gpu_normals, normals, 9 * N * sizeof(float),hipMemcpyHostToDevice); hipMemcpy(gpu_woopPositions, woopPositions, 3 * N * sizeof(float4), hipMemcpyHostToDevice); //hipMalloc((void**)&gpu_bsdfs, BSDFMap.size() * sizeof(GPUBSDF)); delete [] positions; delete [] normals; delete [] woopPositions; hipError_t err = hipSuccess; err = hipMemcpyToSymbol(const_bsdfs, BSDFArray, BSDFMap.size() * sizeof(GPUBSDF)); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::convertBBox(BBox& bbox, GPUBBox& gpu_bbox) { gpu_bbox.min[0] = bbox.min[0]; gpu_bbox.min[1] = bbox.min[1]; gpu_bbox.min[2] = bbox.min[2]; gpu_bbox.max[0] = bbox.max[0]; gpu_bbox.max[1] = bbox.max[1]; gpu_bbox.max[2] = bbox.max[2]; } void CUDAPathTracer::freeBVHNode(GPUBVHNode* node) { GPUBVHNode gpu_node; hipMemcpy(&gpu_node, node, sizeof(GPUBVHNode), hipMemcpyDeviceToHost); hipFree(node); if(gpu_node.left) freeBVHNode(gpu_node.left); if(gpu_node.right) freeBVHNode(gpu_node.right); } GPUBVHNode* CUDAPathTracer::generateBVHNode(BVHNode* node) { GPUBVHNode gpu_node; gpu_node.start = node->start; gpu_node.range = node->range; //printf("(%d, %d)", (int)node->start, (int)node->range); convertBBox(node->bb, gpu_node.bbox); if (node->l) gpu_node.left = generateBVHNode(node->l); else gpu_node.left = NULL; if (node->r) gpu_node.right = generateBVHNode(node->r); else gpu_node.right = NULL; GPUBVHNode* device_node; hipMalloc((void**)&device_node, sizeof(GPUBVHNode)); hipMemcpy(device_node, &gpu_node, sizeof(GPUBVHNode), hipMemcpyHostToDevice); return device_node; } void CUDAPathTracer::loadBVH() { vector<Primitive*> &primitives = pathTracer->bvh->primitives; int N = primitives.size(); int tmpMap[N]; for(int i = 0; i < (int)primitives.size(); i++) { tmpMap[i] = primMap[primitives[i]]; //printf("%d ", tmpMap[i]); } //cout << endl; hipMalloc((void**)&BVHPrimMap, N * sizeof(int)); hipMemcpy(BVHPrimMap, tmpMap, N * sizeof(int), hipMemcpyHostToDevice); BVHRoot = generateBVHNode(pathTracer->bvh->root); // cout << endl; // cout << "=========================" << endl; } void CUDAPathTracer::buildBVH() { printf("build bvh\n"); vector<Primitive*> &primitives = pathTracer->primitives; // can be parallelized? BBox sceneBox; for (size_t i = 0; i < pathTracer->primitives.size(); i++) { sceneBox.expand(pathTracer->primitives[i]->get_bbox()); } Vector3D sceneMin = sceneBox.min; Vector3D sceneExtent = sceneBox.extent; int numObjects = primitives.size(); printf("hipMalloc\n"); hipError_t err = hipSuccess; err = hipMalloc((void**)&gpu_leafNodes, numObjects * sizeof(GPUBVHNode)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate gpu_leafNodes (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void**)&gpu_internalNodes, (numObjects - 1) * sizeof(GPUBVHNode)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate gpu_internalNodes (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void**)&gpu_sortedMortonCodes, numObjects * sizeof(unsigned int)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate gpu_sortedMortonCodes (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void**)&BVHPrimMap, numObjects * sizeof(int)); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate BVHPrimMap (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } BVHParameters tmpParams; tmpParams.numObjects = numObjects; tmpParams.leafNodes = gpu_leafNodes; tmpParams.internalNodes = gpu_internalNodes; tmpParams.sortedMortonCodes = gpu_sortedMortonCodes; tmpParams.sortedObjectIDs = BVHPrimMap; tmpParams.types = gpu_types; tmpParams.positions = gpu_positions; for (int i = 0; i < 3; ++i) { tmpParams.sceneMin[i] = sceneMin[i]; tmpParams.sceneExtent[i] = sceneExtent[i]; } printf("memcpyToSymbol\n"); err = hipMemcpyToSymbol(const_bvhparams, &tmpParams, sizeof(BVHParameters)); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipEvent_t begin, stop; hipEventCreate(&begin); hipEventCreate(&stop); int threadsPerBlock = 256; int numBlocks = (numObjects + threadsPerBlock - 1) / threadsPerBlock; printf("computeMorton\n"); // assign morton code to each primitive float totalms = 0; hipEventRecord(begin); hipLaunchKernelGGL(( computeMorton), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, ); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = hipPeekAtLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // sort primitive according to morton code //wrap raw pointer with a device_ptr to use with Thrust functions // unsigned int* keys = thrust::raw_pointer_cast(const_bvhparams.sortedMortonCodes); // int* data = thrust::raw_pointer_cast(const_bvhparams.sortedObjectIDs); printf("thrustSort\n"); hipEventRecord(begin); thrust::device_ptr<unsigned int> keys = thrust::device_pointer_cast(gpu_sortedMortonCodes); thrust::device_ptr<int> data = thrust::device_pointer_cast(BVHPrimMap); thrust::sort_by_key(keys, keys + numObjects, data); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = hipPeekAtLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("generateLeaf\n"); // generate leaf nodes hipEventRecord(begin); hipLaunchKernelGGL(( generateLeafNode), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, ); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = hipPeekAtLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("generateInternal\n"); // generate internal nodes hipEventRecord(begin); numBlocks = (numObjects - 1 + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( generateInternalNode), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, ); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = hipPeekAtLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // printf("Print Morton Codes\n"); // printMorton<<<1, 1>>>(); // hipDeviceSynchronize(); // hipDeviceSynchronize(); // printf("Leaves\n"); // printLeaf<<<1, 1>>>(); // hipDeviceSynchronize(); // hipDeviceSynchronize(); // printf("Internals\n"); // printInternal<<<1, 1>>>(); // hipDeviceSynchronize(); // hipDeviceSynchronize(); printf("buildBoundingBox\n"); // build bouding box hipEventRecord(begin); numBlocks = (numObjects + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( buildBoundingBox), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, ); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = hipPeekAtLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // printTREE<<<1, 1>>>(); // hipDeviceSynchronize(); // hipDeviceSynchronize(); printf("tree collapse\n"); hipEventRecord(begin); numBlocks = (numObjects - 1 + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( treeCollapse), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, ); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; hipFree(gpu_sortedMortonCodes); BVHRoot = gpu_internalNodes; printf("build BVH done\n"); printf("Total build BVH time:%f\n", totalms / 1000); } // Load light void CUDAPathTracer::toGPULight(SceneLight* l, GPULight *gpuLight) { gpuLight->type = l->getType(); switch(l->getType()) { case 0: // DirectionalLight { DirectionalLight* light = (DirectionalLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; gpuLight->dirToLight[i] = light->dirToLight[i]; } } break; case 1: // InfiniteHemisphereLight { InfiniteHemisphereLight* light = (InfiniteHemisphereLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; for (int j = 0; j < 3; j++) { gpuLight->sampleToWorld[3 * i + j] = light->sampleToWorld(i, j); } } } break; case 2: // PointLight { PointLight* light = (PointLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; gpuLight->position[i] = light->position[i]; } } break; case 3: // AreaLight { AreaLight* light = (AreaLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; gpuLight->position[i] = light->position[i]; gpuLight->direction[i] = light->direction[i]; gpuLight->dim_x[i] = light->dim_x[i]; gpuLight->dim_y[i] = light->dim_y[i]; gpuLight->area = light->area; } } break; default: break; } } void CUDAPathTracer::loadLights() { int tmpLightNum = pathTracer->scene->lights.size(); GPULight tmpLights[tmpLightNum]; for (int i = 0; i < tmpLightNum; ++i) { //displayLight(pathTracer->scene->lights[i]); toGPULight(pathTracer->scene->lights[i], tmpLights + i); } //hipMalloc((void**)&gpu_lights, sizeof(GPULight) * tmpLightNum); hipError_t err = hipSuccess; err = hipMemcpyToSymbol(const_lights, tmpLights, sizeof(GPULight) * tmpLightNum); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // GPULight rtLights[tmpLightNum]; // hipMemcpy(rtLights, gpu_lights, sizeof(GPULight) * tmpLightNum, hipMemcpyDeviceToHost); // //printf("==================\n"); // for (int i = 0; i < tmpLightNum; ++i) // { // displayGPULight(rtLights + i); // } } // load Parameters void CUDAPathTracer::loadParameters() { Parameters tmpParams; tmpParams.screenW = pathTracer->frameBuffer.w; tmpParams.screenH = pathTracer->frameBuffer.h; tmpParams.max_ray_depth = pathTracer->max_ray_depth; tmpParams.ns_aa = pathTracer->ns_aa; tmpParams.ns_area_light = pathTracer->ns_area_light; tmpParams.lightNum = pathTracer->scene->lights.size(); tmpParams.types = gpu_types; tmpParams.bsdfIndexes = gpu_bsdfIndexes; tmpParams.positions = gpu_positions; tmpParams.normals = gpu_normals; tmpParams.primNum = primNum; tmpParams.frameBuffer = frameBuffer; tmpParams.BVHPrimMap = BVHPrimMap; tmpParams.BVHRoot = BVHRoot; tmpParams.woopPositions = gpu_woopPositions; hipMalloc((void**)gpu_camOffset, sizeof(float3)); tmpParams.camOffset = gpu_camOffset; cout << "primNum:" << primNum << endl; hipError_t err = hipSuccess; err = hipMemcpyToSymbol(const_params, &tmpParams, sizeof(Parameters)); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //Parameters rtParms; //hipMemcpy(&rtParms, parms, sizeof(Parameters), hipMemcpyDeviceToHost); //printf("screenW: %d, screenH: %d, max_ray_depth: %d, ns_aa: %d, ns_area_light: %d, lightNum: %d\n", rtParms.screenW, rtParms.screenH, rtParms.max_ray_depth, rtParms.ns_aa, rtParms.ns_area_light, rtParms.lightNum); } void CUDAPathTracer::updateHostSampleBuffer() { float* gpuBuffer = (float*) malloc(sizeof(float) * (3 * screenW * screenH)); hipError_t err = hipSuccess; err = hipMemcpy(gpuBuffer, frameBuffer, sizeof(float) * (3 * screenW * screenH), hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } pathTracer->updateBufferFromGPU(gpuBuffer); free(gpuBuffer); } void PathTracer::updateBufferFromGPU(float* gpuBuffer) { size_t w = sampleBuffer.w; size_t h = sampleBuffer.h; for (int x = 0; x < w; ++x) { for (int y = 0; y < h; ++y) { int index = 3 * (y * w + x); Spectrum s(gpuBuffer[index], gpuBuffer[index + 1], gpuBuffer[index + 2]); //cout << s.r << "," << s.g << "," << s.b << endl; sampleBuffer.update_pixel(s, x, y); } } sampleBuffer.toColor(frameBuffer, 0, 0, w, h); }
e19460b6755c2cba04d0b53e323ee463647b3829.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> #include <iostream> #include <time.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <curand_kernel.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include "setup.h" struct Parameters { int screenW; int screenH; int max_ray_depth; ///< maximum allowed ray depth (applies to all rays) int ns_aa; ///< number of camera rays in one pixel (along one axis) int ns_area_light; ///< number samples per area light source int lightNum; int primNum; int* types; int* bsdfIndexes; float* positions; float* normals; float4* woopPositions; float3* camOffset; float* frameBuffer; int* BVHPrimMap; GPUBVHNode* BVHRoot; }; struct BVHParameters { float sceneMin[3]; float sceneExtent[3]; int numObjects; GPUBVHNode *leafNodes; GPUBVHNode *internalNodes; unsigned int*sortedMortonCodes; int *sortedObjectIDs; int *types; float *positions; }; //#define PARALLEL_BUILD_BVH #define TILE_DIM 1 #include "kernel.cu" #include <map> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ //using namespace std; float3* gpu_camOffset; float4* gpu_woopPositions; CUDAPathTracer::CUDAPathTracer(PathTracer* _pathTracer) { pathTracer = _pathTracer; } CUDAPathTracer::~CUDAPathTracer() { cudaFree(gpu_types); cudaFree(gpu_bsdfIndexes); cudaFree(gpu_positions); cudaFree(gpu_normals); cudaFree(gpu_woopPositions); cudaFree(frameBuffer); cudaFree(BVHPrimMap); #ifdef PARALLEL_BUILD_BVH // cudaFree(gpu_sortedMortonCodes); free at the end of setup cudaFree(gpu_leafNodes); cudaFree(gpu_internalNodes); #else freeBVHNode(BVHRoot); #endif } void CUDAPathTracer::startRayTracing() { int xTileNum = TILE_DIM; int yTileNum = TILE_DIM; int width = (screenW + xTileNum - 1) / xTileNum; int height = (screenH + yTileNum - 1) / yTileNum; int blockDim = 256; int gridDim = (width * height + blockDim - 1) / blockDim; for(int i = 0; i < xTileNum; i++) for(int j = 0; j < yTileNum; j++) { traceScene<<<gridDim, blockDim>>>(i * width, j * height, width, height); } cudaError_t err = cudaPeekAtLastError(); cudaDeviceSynchronize(); cudaThreadSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::startRayTracingPT() { int xTileNum = TILE_DIM; int yTileNum = TILE_DIM; int width = (screenW + xTileNum - 1) / xTileNum; int height = (screenH + yTileNum - 1) / yTileNum; int blockDim = BLOCK_DIM; int gridDim = 256; unsigned long long zero = 0; for(int i = 0; i < xTileNum; i++) for(int j = 0; j < yTileNum; j++) { int tmp_width = min(screenW - i * width, width); int tmp_height = min(screenH - j * height, height); traceScenePT<<<gridDim, blockDim>>>(i * width, j * height, tmp_width, tmp_height); cudaMemcpyToSymbol(globalPoolNextRay, &zero, sizeof(unsigned long long)); cudaThreadSynchronize(); } cudaError_t err = cudaPeekAtLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::init() { cudaDeviceReset(); loadCamera(); loadPrimitives(); loadLights(); #ifdef PARALLEL_BUILD_BVH buildBVH(); #else loadBVH(); #endif createFrameBuffer(); loadParameters(); cudaDeviceSetLimit(cudaLimitStackSize, 1024 * 24); //printInfo<<<1, 1>>>(); //cudaDeviceSynchronize(); } void CUDAPathTracer::createFrameBuffer() { cudaError_t err = cudaSuccess; screenH = pathTracer->frameBuffer.h; screenW = pathTracer->frameBuffer.w; err = cudaMalloc((void**)&frameBuffer, 3 * screenW * screenH * sizeof(float)); cudaMemset(frameBuffer, 0, 3 * screenW * screenH * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::loadCamera() { //printf("load camera\n"); //printf("camera: %p\n", pathTracer->camera); GPUCamera tmpCam; Camera* cam = pathTracer->camera; tmpCam.widthDivDist = cam->screenW / cam->screenDist; tmpCam.heightDivDist = cam->screenH / cam->screenDist; //printf("after loading camera\n"); for (int i = 0; i < 9; i++) { tmpCam.c2w[i] = cam->c2w(i / 3, i % 3); } for (int i = 0; i < 3; i++) { tmpCam.pos[i] = cam->pos[i]; } cudaError_t err = cudaSuccess; //cudaMalloc((void**)&gpu_camera,sizeof(GPUCamera)); err = cudaMemcpyToSymbol(const_camera, &tmpCam,sizeof(GPUCamera)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::loadPrimitives() { vector<Primitive *>& primitives = pathTracer->primitives; int N = primitives.size(); int types[N]; int bsdfs[N]; float *positions = new float[9 * N]; float *normals = new float[9 * N]; float4* woopPositions = new float4[3 * N]; primNum = N; map<BSDF*, int> BSDFMap; for (int i = 0; i < N; i++) { primMap[primitives[i]] = i; types[i] = primitives[i]->getType(); BSDF* bsdf = primitives[i]->get_bsdf(); if (BSDFMap.find(bsdf) == BSDFMap.end()) { int index = BSDFMap.size(); BSDFMap[bsdf] = index; bsdfs[i] = index; } else{ bsdfs[i] = BSDFMap[bsdf]; } if (types[i] == 0) { Vector3D o = ((Sphere*)primitives[i])->o; positions[9 * i] = o[0]; positions[9 * i + 1] = o[1]; positions[9 * i + 2] = o[2]; positions[9 * i + 3] = ((Sphere*)primitives[i])->r; } else{ const Mesh* mesh = ((Triangle*)primitives[i])->mesh; int v1 = ((Triangle*)primitives[i])->v1; int v2 = ((Triangle*)primitives[i])->v2; int v3 = ((Triangle*)primitives[i])->v3; positions[9 * i] = mesh->positions[v1][0]; positions[9 * i + 1] = mesh->positions[v1][1]; positions[9 * i + 2] = mesh->positions[v1][2]; normals[9 * i] = mesh->normals[v1][0]; normals[9 * i + 1] = mesh->normals[v1][1]; normals[9 * i + 2] = mesh->normals[v1][2]; positions[9 * i + 3] = mesh->positions[v2][0] - positions[9 * i]; positions[9 * i + 4] = mesh->positions[v2][1] - positions[9 * i + 1]; positions[9 * i + 5] = mesh->positions[v2][2] - positions[9 * i + 2]; normals[9 * i + 3] = mesh->normals[v2][0]; normals[9 * i + 4] = mesh->normals[v2][1]; normals[9 * i + 5] = mesh->normals[v2][2]; positions[9 * i + 6] = mesh->positions[v3][0] - positions[9 * i]; positions[9 * i + 7] = mesh->positions[v3][1] - positions[9 * i + 1]; positions[9 * i + 8] = mesh->positions[v3][2] - positions[9 * i + 2]; normals[9 * i + 6] = mesh->normals[v3][0]; normals[9 * i + 7] = mesh->normals[v3][1]; normals[9 * i + 8] = mesh->normals[v3][2]; Matrix4x4 mtx; Vector3D c0(positions[9 * i + 3], positions[9 * i + 4], positions[9 * i + 5]); Vector3D c1(positions[9 * i + 6], positions[9 * i + 7], positions[9 * i + 8]); Vector3D c2 = cross(c0, c1); Vector3D c3(positions[9 * i], positions[9 * i + 1], positions[9 * i + 2]); mtx[0] = Vector4D(c0); mtx[1] = Vector4D(c1); mtx[2] = Vector4D(c2); mtx[3] = Vector4D(c3, 1.0); mtx = mtx.inv(); woopPositions[3 * i] = make_float4(mtx(2,0), mtx(2,1), mtx(2,2), -mtx(2,3)); woopPositions[3 * i + 1] = make_float4(mtx(0,0), mtx(0,1), mtx(0,2), mtx(0,3)); woopPositions[3 * i + 2] = make_float4(mtx(1,0), mtx(1,1), mtx(1,2), mtx(1,3)); } } GPUBSDF BSDFArray[BSDFMap.size()]; for (auto itr = BSDFMap.begin(); itr != BSDFMap.end(); itr++) { GPUBSDF& gpu_bsdf = BSDFArray[itr->second]; BSDF* bsdf = itr->first; gpu_bsdf.type = bsdf->getType(); if (gpu_bsdf.type == 0) { Spectrum& albedo = ((DiffuseBSDF*)bsdf)->albedo; gpu_bsdf.albedo[0] = albedo.r; gpu_bsdf.albedo[1] = albedo.g; gpu_bsdf.albedo[2] = albedo.b; } else if(gpu_bsdf.type == 1){ Spectrum& reflectance = ((MirrorBSDF*)bsdf)->reflectance; gpu_bsdf.reflectance[0] = reflectance.r; gpu_bsdf.reflectance[1] = reflectance.g; gpu_bsdf.reflectance[2] = reflectance.b; } else if(gpu_bsdf.type == 2){ Spectrum& transmittance = ((RefractionBSDF*)bsdf)->transmittance; gpu_bsdf.transmittance[0] = transmittance.r; gpu_bsdf.transmittance[1] = transmittance.g; gpu_bsdf.transmittance[2] = transmittance.b; gpu_bsdf.ior = ((RefractionBSDF*)bsdf)->ior; } else if(gpu_bsdf.type == 3){ Spectrum& reflectance = ((GlassBSDF*)bsdf)->reflectance; gpu_bsdf.reflectance[0] = reflectance.r; gpu_bsdf.reflectance[1] = reflectance.g; gpu_bsdf.reflectance[2] = reflectance.b; Spectrum& transmittance = ((GlassBSDF*)bsdf)->transmittance; gpu_bsdf.transmittance[0] = transmittance.r; gpu_bsdf.transmittance[1] = transmittance.g; gpu_bsdf.transmittance[2] = transmittance.b; gpu_bsdf.ior = ((GlassBSDF*)bsdf)->ior; } else if(gpu_bsdf.type == 4){ Spectrum& albedo = ((EmissionBSDF*)bsdf)->radiance; gpu_bsdf.albedo[0] = albedo.r; gpu_bsdf.albedo[1] = albedo.g; gpu_bsdf.albedo[2] = albedo.b; } } cudaMalloc((void**)&gpu_types, N * sizeof(int)); cudaMalloc((void**)&gpu_bsdfIndexes, N * sizeof(int)); cudaMalloc((void**)&gpu_positions, 9 * N * sizeof(float)); cudaMalloc((void**)&gpu_normals, 9 * N * sizeof(float)); cudaMalloc((void**)&gpu_woopPositions, 3 * N * sizeof(float4)); cudaMemcpy(gpu_types, types, N * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(gpu_bsdfIndexes, bsdfs, N * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(gpu_positions, positions, 9 * N * sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(gpu_normals, normals, 9 * N * sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(gpu_woopPositions, woopPositions, 3 * N * sizeof(float4), cudaMemcpyHostToDevice); //cudaMalloc((void**)&gpu_bsdfs, BSDFMap.size() * sizeof(GPUBSDF)); delete [] positions; delete [] normals; delete [] woopPositions; cudaError_t err = cudaSuccess; err = cudaMemcpyToSymbol(const_bsdfs, BSDFArray, BSDFMap.size() * sizeof(GPUBSDF)); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void CUDAPathTracer::convertBBox(BBox& bbox, GPUBBox& gpu_bbox) { gpu_bbox.min[0] = bbox.min[0]; gpu_bbox.min[1] = bbox.min[1]; gpu_bbox.min[2] = bbox.min[2]; gpu_bbox.max[0] = bbox.max[0]; gpu_bbox.max[1] = bbox.max[1]; gpu_bbox.max[2] = bbox.max[2]; } void CUDAPathTracer::freeBVHNode(GPUBVHNode* node) { GPUBVHNode gpu_node; cudaMemcpy(&gpu_node, node, sizeof(GPUBVHNode), cudaMemcpyDeviceToHost); cudaFree(node); if(gpu_node.left) freeBVHNode(gpu_node.left); if(gpu_node.right) freeBVHNode(gpu_node.right); } GPUBVHNode* CUDAPathTracer::generateBVHNode(BVHNode* node) { GPUBVHNode gpu_node; gpu_node.start = node->start; gpu_node.range = node->range; //printf("(%d, %d)", (int)node->start, (int)node->range); convertBBox(node->bb, gpu_node.bbox); if (node->l) gpu_node.left = generateBVHNode(node->l); else gpu_node.left = NULL; if (node->r) gpu_node.right = generateBVHNode(node->r); else gpu_node.right = NULL; GPUBVHNode* device_node; cudaMalloc((void**)&device_node, sizeof(GPUBVHNode)); cudaMemcpy(device_node, &gpu_node, sizeof(GPUBVHNode), cudaMemcpyHostToDevice); return device_node; } void CUDAPathTracer::loadBVH() { vector<Primitive*> &primitives = pathTracer->bvh->primitives; int N = primitives.size(); int tmpMap[N]; for(int i = 0; i < (int)primitives.size(); i++) { tmpMap[i] = primMap[primitives[i]]; //printf("%d ", tmpMap[i]); } //cout << endl; cudaMalloc((void**)&BVHPrimMap, N * sizeof(int)); cudaMemcpy(BVHPrimMap, tmpMap, N * sizeof(int), cudaMemcpyHostToDevice); BVHRoot = generateBVHNode(pathTracer->bvh->root); // cout << endl; // cout << "=========================" << endl; } void CUDAPathTracer::buildBVH() { printf("build bvh\n"); vector<Primitive*> &primitives = pathTracer->primitives; // can be parallelized? BBox sceneBox; for (size_t i = 0; i < pathTracer->primitives.size(); i++) { sceneBox.expand(pathTracer->primitives[i]->get_bbox()); } Vector3D sceneMin = sceneBox.min; Vector3D sceneExtent = sceneBox.extent; int numObjects = primitives.size(); printf("cudaMalloc\n"); cudaError_t err = cudaSuccess; err = cudaMalloc((void**)&gpu_leafNodes, numObjects * sizeof(GPUBVHNode)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate gpu_leafNodes (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void**)&gpu_internalNodes, (numObjects - 1) * sizeof(GPUBVHNode)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate gpu_internalNodes (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void**)&gpu_sortedMortonCodes, numObjects * sizeof(unsigned int)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate gpu_sortedMortonCodes (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void**)&BVHPrimMap, numObjects * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate BVHPrimMap (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } BVHParameters tmpParams; tmpParams.numObjects = numObjects; tmpParams.leafNodes = gpu_leafNodes; tmpParams.internalNodes = gpu_internalNodes; tmpParams.sortedMortonCodes = gpu_sortedMortonCodes; tmpParams.sortedObjectIDs = BVHPrimMap; tmpParams.types = gpu_types; tmpParams.positions = gpu_positions; for (int i = 0; i < 3; ++i) { tmpParams.sceneMin[i] = sceneMin[i]; tmpParams.sceneExtent[i] = sceneExtent[i]; } printf("memcpyToSymbol\n"); err = cudaMemcpyToSymbol(const_bvhparams, &tmpParams, sizeof(BVHParameters)); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaEvent_t begin, stop; cudaEventCreate(&begin); cudaEventCreate(&stop); int threadsPerBlock = 256; int numBlocks = (numObjects + threadsPerBlock - 1) / threadsPerBlock; printf("computeMorton\n"); // assign morton code to each primitive float totalms = 0; cudaEventRecord(begin); computeMorton<<<numBlocks, threadsPerBlock>>>(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = cudaPeekAtLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // sort primitive according to morton code //wrap raw pointer with a device_ptr to use with Thrust functions // unsigned int* keys = thrust::raw_pointer_cast(const_bvhparams.sortedMortonCodes); // int* data = thrust::raw_pointer_cast(const_bvhparams.sortedObjectIDs); printf("thrustSort\n"); cudaEventRecord(begin); thrust::device_ptr<unsigned int> keys = thrust::device_pointer_cast(gpu_sortedMortonCodes); thrust::device_ptr<int> data = thrust::device_pointer_cast(BVHPrimMap); thrust::sort_by_key(keys, keys + numObjects, data); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = cudaPeekAtLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("generateLeaf\n"); // generate leaf nodes cudaEventRecord(begin); generateLeafNode<<<numBlocks, threadsPerBlock>>>(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = cudaPeekAtLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("generateInternal\n"); // generate internal nodes cudaEventRecord(begin); numBlocks = (numObjects - 1 + threadsPerBlock - 1) / threadsPerBlock; generateInternalNode<<<numBlocks, threadsPerBlock>>>(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = cudaPeekAtLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // printf("Print Morton Codes\n"); // printMorton<<<1, 1>>>(); // cudaThreadSynchronize(); // cudaDeviceSynchronize(); // printf("Leaves\n"); // printLeaf<<<1, 1>>>(); // cudaThreadSynchronize(); // cudaDeviceSynchronize(); // printf("Internals\n"); // printInternal<<<1, 1>>>(); // cudaThreadSynchronize(); // cudaDeviceSynchronize(); printf("buildBoundingBox\n"); // build bouding box cudaEventRecord(begin); numBlocks = (numObjects + threadsPerBlock - 1) / threadsPerBlock; buildBoundingBox<<<numBlocks, threadsPerBlock>>>(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; err = cudaPeekAtLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // printTREE<<<1, 1>>>(); // cudaThreadSynchronize(); // cudaDeviceSynchronize(); printf("tree collapse\n"); cudaEventRecord(begin); numBlocks = (numObjects - 1 + threadsPerBlock - 1) / threadsPerBlock; treeCollapse<<<numBlocks, threadsPerBlock>>>(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, begin, stop); printf("%f\n", milliseconds / 1000); totalms += milliseconds; cudaFree(gpu_sortedMortonCodes); BVHRoot = gpu_internalNodes; printf("build BVH done\n"); printf("Total build BVH time:%f\n", totalms / 1000); } // Load light void CUDAPathTracer::toGPULight(SceneLight* l, GPULight *gpuLight) { gpuLight->type = l->getType(); switch(l->getType()) { case 0: // DirectionalLight { DirectionalLight* light = (DirectionalLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; gpuLight->dirToLight[i] = light->dirToLight[i]; } } break; case 1: // InfiniteHemisphereLight { InfiniteHemisphereLight* light = (InfiniteHemisphereLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; for (int j = 0; j < 3; j++) { gpuLight->sampleToWorld[3 * i + j] = light->sampleToWorld(i, j); } } } break; case 2: // PointLight { PointLight* light = (PointLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; gpuLight->position[i] = light->position[i]; } } break; case 3: // AreaLight { AreaLight* light = (AreaLight*) l; for (int i = 0; i < 3; ++i) { gpuLight->radiance[i] = light->radiance[i]; gpuLight->position[i] = light->position[i]; gpuLight->direction[i] = light->direction[i]; gpuLight->dim_x[i] = light->dim_x[i]; gpuLight->dim_y[i] = light->dim_y[i]; gpuLight->area = light->area; } } break; default: break; } } void CUDAPathTracer::loadLights() { int tmpLightNum = pathTracer->scene->lights.size(); GPULight tmpLights[tmpLightNum]; for (int i = 0; i < tmpLightNum; ++i) { //displayLight(pathTracer->scene->lights[i]); toGPULight(pathTracer->scene->lights[i], tmpLights + i); } //cudaMalloc((void**)&gpu_lights, sizeof(GPULight) * tmpLightNum); cudaError_t err = cudaSuccess; err = cudaMemcpyToSymbol(const_lights, tmpLights, sizeof(GPULight) * tmpLightNum); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // GPULight rtLights[tmpLightNum]; // cudaMemcpy(rtLights, gpu_lights, sizeof(GPULight) * tmpLightNum, cudaMemcpyDeviceToHost); // //printf("==================\n"); // for (int i = 0; i < tmpLightNum; ++i) // { // displayGPULight(rtLights + i); // } } // load Parameters void CUDAPathTracer::loadParameters() { Parameters tmpParams; tmpParams.screenW = pathTracer->frameBuffer.w; tmpParams.screenH = pathTracer->frameBuffer.h; tmpParams.max_ray_depth = pathTracer->max_ray_depth; tmpParams.ns_aa = pathTracer->ns_aa; tmpParams.ns_area_light = pathTracer->ns_area_light; tmpParams.lightNum = pathTracer->scene->lights.size(); tmpParams.types = gpu_types; tmpParams.bsdfIndexes = gpu_bsdfIndexes; tmpParams.positions = gpu_positions; tmpParams.normals = gpu_normals; tmpParams.primNum = primNum; tmpParams.frameBuffer = frameBuffer; tmpParams.BVHPrimMap = BVHPrimMap; tmpParams.BVHRoot = BVHRoot; tmpParams.woopPositions = gpu_woopPositions; cudaMalloc((void**)gpu_camOffset, sizeof(float3)); tmpParams.camOffset = gpu_camOffset; cout << "primNum:" << primNum << endl; cudaError_t err = cudaSuccess; err = cudaMemcpyToSymbol(const_params, &tmpParams, sizeof(Parameters)); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Parameters rtParms; //cudaMemcpy(&rtParms, parms, sizeof(Parameters), cudaMemcpyDeviceToHost); //printf("screenW: %d, screenH: %d, max_ray_depth: %d, ns_aa: %d, ns_area_light: %d, lightNum: %d\n", rtParms.screenW, rtParms.screenH, rtParms.max_ray_depth, rtParms.ns_aa, rtParms.ns_area_light, rtParms.lightNum); } void CUDAPathTracer::updateHostSampleBuffer() { float* gpuBuffer = (float*) malloc(sizeof(float) * (3 * screenW * screenH)); cudaError_t err = cudaSuccess; err = cudaMemcpy(gpuBuffer, frameBuffer, sizeof(float) * (3 * screenW * screenH), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } pathTracer->updateBufferFromGPU(gpuBuffer); free(gpuBuffer); } void PathTracer::updateBufferFromGPU(float* gpuBuffer) { size_t w = sampleBuffer.w; size_t h = sampleBuffer.h; for (int x = 0; x < w; ++x) { for (int y = 0; y < h; ++y) { int index = 3 * (y * w + x); Spectrum s(gpuBuffer[index], gpuBuffer[index + 1], gpuBuffer[index + 2]); //cout << s.r << "," << s.g << "," << s.b << endl; sampleBuffer.update_pixel(s, x, y); } } sampleBuffer.toColor(frameBuffer, 0, 0, w, h); }
f46c87dccf966973e261d73353945778b1dc0ace.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "backprop.h" // cuda kernels #include "bpnn_layerforward.h" #include "bpnn_adjust_weights.h" double get_time() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } unsigned int num_threads = 0; unsigned int num_blocks = 0; //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { setup(argc, argv); } int bpnn_train_kernel(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; float *input_weights_one_dim; float *input_weights_prev_one_dim; float * partial_sum; float sum; unsigned int num_blocks = in / BLOCK_SIZE; input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float)); // this preprocessing stage is temporarily added to correct the bug of wrong memcopy using two-dimensional net->inputweights // todo: fix mem allocation int m = 0; for (int k = 0; k <= in; k++) { for (int j = 0; j <= hid; j++) { input_weights_one_dim[m] = net->input_weights[k][j]; input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j]; m++; } } printf("Performing GPU computation\n"); double offload_start = get_time(); float* d_input; float *d_input_weights; float *d_hidden_partial_sum; float *d_hidden_delta; float *d_input_prev_weights; hipMalloc((void**)&d_input, sizeof(float)*(in+1)); hipMalloc((void**)&d_input_weights, sizeof(float)*(in+1)*(hid+1)); hipMalloc((void**)&d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH); hipMemcpyAsync(d_input, net->input_units, sizeof(float)*(in+1), hipMemcpyHostToDevice, 0); hipMemcpyAsync(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), hipMemcpyHostToDevice, 0); dim3 grid(1, num_blocks); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( kernel_layerforward), dim3(grid), dim3(threads), 0, 0, d_input, d_input_weights, d_hidden_partial_sum, hid); hipMemcpy(partial_sum, d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH, hipMemcpyDeviceToHost); for (int j = 1; j <= hid; j++) { sum = 0.f; for (unsigned int k = 0; k < num_blocks; k++) { sum += partial_sum[k * hid + j-1] ; } #ifdef DEBUG printf("j=%d sum=%f\n", j,sum); #endif sum += net->input_weights[0][j]; net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum))); } bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); // input_weights has been written in the first kernel, so it needs to be restored. hipMemcpyAsync(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), hipMemcpyHostToDevice, 0); hipMalloc((void**)&d_hidden_delta, sizeof(float)*(hid+1)); hipMalloc((void**)&d_input_prev_weights, sizeof(float)*(in+1)*(hid+1)); hipMemcpyAsync(d_hidden_delta, net->hidden_delta, sizeof(float)*(hid+1), hipMemcpyHostToDevice, 0); hipMemcpyAsync(d_input_prev_weights, input_weights_prev_one_dim, sizeof(float)*(in+1)*(hid+1), hipMemcpyHostToDevice, 0); hipLaunchKernelGGL(( kernel_adjust_weights), dim3(grid), dim3(threads), 0, 0, d_input, d_input_weights, d_hidden_delta, d_input_prev_weights, hid); hipMemcpy(input_weights_one_dim, d_input_weights, sizeof(float)*(in+1)*(hid+1), hipMemcpyDeviceToHost); double offload_end = get_time(); printf("Device offloading time = %lf(s)\n", offload_end - offload_start); hipFree(d_input); hipFree(d_input_weights); hipFree(d_hidden_partial_sum); hipFree(d_hidden_delta); hipFree(d_input_prev_weights); #ifdef OUTPUT for (int i = 0; i < (in+1); i++) printf("i=%d input_units=%f\n", i,net->input_units[i]); for (int i = 0; i < (in+1)*(hid+1); i++) printf("i=%d input_weights=%f\n", i,input_weights_one_dim[i]); #endif free(input_weights_prev_one_dim); free(partial_sum); free(input_weights_one_dim); return 0; }
f46c87dccf966973e261d73353945778b1dc0ace.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <cuda.h> #include "backprop.h" // cuda kernels #include "bpnn_layerforward.h" #include "bpnn_adjust_weights.h" double get_time() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } unsigned int num_threads = 0; unsigned int num_blocks = 0; //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { setup(argc, argv); } int bpnn_train_kernel(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; float *input_weights_one_dim; float *input_weights_prev_one_dim; float * partial_sum; float sum; unsigned int num_blocks = in / BLOCK_SIZE; input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float)); partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float)); // this preprocessing stage is temporarily added to correct the bug of wrong memcopy using two-dimensional net->inputweights // todo: fix mem allocation int m = 0; for (int k = 0; k <= in; k++) { for (int j = 0; j <= hid; j++) { input_weights_one_dim[m] = net->input_weights[k][j]; input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j]; m++; } } printf("Performing GPU computation\n"); double offload_start = get_time(); float* d_input; float *d_input_weights; float *d_hidden_partial_sum; float *d_hidden_delta; float *d_input_prev_weights; cudaMalloc((void**)&d_input, sizeof(float)*(in+1)); cudaMalloc((void**)&d_input_weights, sizeof(float)*(in+1)*(hid+1)); cudaMalloc((void**)&d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH); cudaMemcpyAsync(d_input, net->input_units, sizeof(float)*(in+1), cudaMemcpyHostToDevice, 0); cudaMemcpyAsync(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), cudaMemcpyHostToDevice, 0); dim3 grid(1, num_blocks); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); kernel_layerforward<<<grid, threads>>>(d_input, d_input_weights, d_hidden_partial_sum, hid); cudaMemcpy(partial_sum, d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH, cudaMemcpyDeviceToHost); for (int j = 1; j <= hid; j++) { sum = 0.f; for (unsigned int k = 0; k < num_blocks; k++) { sum += partial_sum[k * hid + j-1] ; } #ifdef DEBUG printf("j=%d sum=%f\n", j,sum); #endif sum += net->input_weights[0][j]; net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum))); } bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); // input_weights has been written in the first kernel, so it needs to be restored. cudaMemcpyAsync(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), cudaMemcpyHostToDevice, 0); cudaMalloc((void**)&d_hidden_delta, sizeof(float)*(hid+1)); cudaMalloc((void**)&d_input_prev_weights, sizeof(float)*(in+1)*(hid+1)); cudaMemcpyAsync(d_hidden_delta, net->hidden_delta, sizeof(float)*(hid+1), cudaMemcpyHostToDevice, 0); cudaMemcpyAsync(d_input_prev_weights, input_weights_prev_one_dim, sizeof(float)*(in+1)*(hid+1), cudaMemcpyHostToDevice, 0); kernel_adjust_weights<<<grid, threads>>>(d_input, d_input_weights, d_hidden_delta, d_input_prev_weights, hid); cudaMemcpy(input_weights_one_dim, d_input_weights, sizeof(float)*(in+1)*(hid+1), cudaMemcpyDeviceToHost); double offload_end = get_time(); printf("Device offloading time = %lf(s)\n", offload_end - offload_start); cudaFree(d_input); cudaFree(d_input_weights); cudaFree(d_hidden_partial_sum); cudaFree(d_hidden_delta); cudaFree(d_input_prev_weights); #ifdef OUTPUT for (int i = 0; i < (in+1); i++) printf("i=%d input_units=%f\n", i,net->input_units[i]); for (int i = 0; i < (in+1)*(hid+1); i++) printf("i=%d input_weights=%f\n", i,input_weights_one_dim[i]); #endif free(input_weights_prev_one_dim); free(partial_sum); free(input_weights_one_dim); return 0; }
ec01c7baa398e8eb7fffbf1924da28b406f40709.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @author Mark Gates @generated from zlanhe.cu normal z -> c, Fri Jan 30 19:00:09 2015 */ #include "common_magma.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_c /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void clanhe_inf_kernel_generic_lower( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_C_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for(int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += 1; // sum column below diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void clanhe_inf_kernel_generic_upper( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_C_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for(int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += lda; //# // sum #row right of diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void clanhe_inf( magma_uplo_t uplo, int n, magmaFloatComplex_const_ptr A, int lda, magmaFloat_ptr dwork ) { int blocks = (n - 1)/inf_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(inf_bs, 4, 1); int n_full_block = (n - n % inf_bs) /inf_bs; int n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { hipLaunchKernelGGL(( clanhe_inf_kernel_generic_lower), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork, n_full_block, n_mod_bs ); } else { hipLaunchKernelGGL(( clanhe_inf_kernel_generic_upper), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork, n_full_block, n_mod_bs ); } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void clanhe_max_kernel_lower( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; for(int j=0; j < ind; ++j) { res = fmax( res, cuCabsf( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void clanhe_max_kernel_upper( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for(int j=n-1; j > ind; j--) { res = fmax( res, cuCabsf( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void clanhe_max( magma_uplo_t uplo, int n, magmaFloatComplex_const_ptr A, int lda, magmaFloat_ptr dwork ) { int blocks = (n - 1)/max_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(max_bs, 1, 1); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( clanhe_max_kernel_lower), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork ); } else { hipLaunchKernelGGL(( clanhe_max_kernel_upper), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork ); } } /* ====================================================================== */ /** Purpose ------- CLANHE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a complex Hermitian matrix A. CLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normI(A), NORM = 'I' or 'i' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Returns CLANHE < 0: if CLANHE = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm CHARACTER*1 Specifies the value to be returned in CLANHE as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the Hermitian matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, CLANHE is set to zero. @param[in] A COMPLEX array on the GPU, dimension (LDA,N) The Hermitian matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(N,1). @param dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires work. @ingroup magma_caux2 ********************************************************************/ extern "C" float magmablas_clanhe( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dwork ) { magma_int_t info = 0; magma_int_t arch = magma_getdevice_arch(); // 1-norm == inf-norm since A is Hermitian bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < n ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { clanhe_inf( uplo, n, dA, ldda, dwork ); } else { clanhe_max( uplo, n, dA, ldda, dwork ); } int i = magma_isamax( n, dwork, 1 ) - 1; hipMemcpy( &res, &dwork[i], sizeof(float), hipMemcpyDeviceToHost ); return res; }
ec01c7baa398e8eb7fffbf1924da28b406f40709.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @author Mark Gates @generated from zlanhe.cu normal z -> c, Fri Jan 30 19:00:09 2015 */ #include "common_magma.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_c /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void clanhe_inf_kernel_generic_lower( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_C_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for(int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += 1; // sum column below diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void clanhe_inf_kernel_generic_upper( int n, const magmaFloatComplex* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ magmaFloatComplex la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for(int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for(int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #if defined(PRECISION_z) || defined(PRECISION_c) else if ( i == tx ) { la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for(int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_C_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for(int j=ty*8; j < ty*8 + 8; j++) { res += cuCabsf( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for(int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for(int j=0; j < 8; j++) { res += cuCabsf( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for(int j=0; j < tx; j++) { res += cuCabsf( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_C_REAL( *A )); A += lda; //# // sum #row right of diagonal for(int j=tx+1; j < n_mod_bs; j++) { res += cuCabsf( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_C_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_C_REAL( la[tx][1] ) + MAGMA_C_REAL( la[tx][2] ) + MAGMA_C_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void clanhe_inf( magma_uplo_t uplo, int n, magmaFloatComplex_const_ptr A, int lda, magmaFloat_ptr dwork ) { int blocks = (n - 1)/inf_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(inf_bs, 4, 1); int n_full_block = (n - n % inf_bs) /inf_bs; int n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { clanhe_inf_kernel_generic_lower<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } else { clanhe_inf_kernel_generic_upper<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void clanhe_max_kernel_lower( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; for(int j=0; j < ind; ++j) { res = fmax( res, cuCabsf( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void clanhe_max_kernel_upper( int n, const magmaFloatComplex* A, int lda, float *dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for(int j=n-1; j > ind; j--) { res = fmax( res, cuCabsf( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = fmax( res, MAGMA_D_ABS( MAGMA_C_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void clanhe_max( magma_uplo_t uplo, int n, magmaFloatComplex_const_ptr A, int lda, magmaFloat_ptr dwork ) { int blocks = (n - 1)/max_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(max_bs, 1, 1); if ( uplo == MagmaLower ) { clanhe_max_kernel_lower<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork ); } else { clanhe_max_kernel_upper<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork ); } } /* ====================================================================== */ /** Purpose ------- CLANHE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a complex Hermitian matrix A. CLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normI(A), NORM = 'I' or 'i' ** supported only for (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Returns CLANHE < 0: if CLANHE = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm CHARACTER*1 Specifies the value to be returned in CLANHE as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the Hermitian matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, CLANHE is set to zero. @param[in] A COMPLEX array on the GPU, dimension (LDA,N) The Hermitian matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(N,1). @param dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires work. @ingroup magma_caux2 ********************************************************************/ extern "C" float magmablas_clanhe( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dwork ) { magma_int_t info = 0; magma_int_t arch = magma_getdevice_arch(); // 1-norm == inf-norm since A is Hermitian bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < n ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { clanhe_inf( uplo, n, dA, ldda, dwork ); } else { clanhe_max( uplo, n, dA, ldda, dwork ); } int i = magma_isamax( n, dwork, 1 ) - 1; cudaMemcpy( &res, &dwork[i], sizeof(float), cudaMemcpyDeviceToHost ); return res; }
smithwaterman_change_4_stage2_orignal_real_data.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; __shared__ int mismatch; __shared__ int match; __shared__ int open; __shared__ int extend; __shared__ short2 * direction_index; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; read_base_array=(char *) (data+num_add[offset].address_array); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); direction_index=(short2 *) (direction+offset*640*1100); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[130]; __shared__ int gap_h[130]; //insertion __shared__ short2 gap_size_h[130]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; //__shared__ char cigar_m[128]; //__shared__ int cigar_int_m[128]; //int final_result; //int final_i; //int final_j; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; match=200; mismatch=-150; open=-260; extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; { char read_base; read_base=read_base_array[threadIdx.x]; int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; int gap_size_v=0; //Deletion int M=0; //now int step_right; //now int ki=0;//insertion h negetive //deletion v int MMM=0; short mt=0; short2 curmt; curmt.x=0; curmt.y=0; int current_reference_id=0; for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { int prev_gap=M-260; //M which is cacluated by last step in the same thread gap_v+=-11; if(prev_gap>gap_v) { gap_v=prev_gap; gap_size_v=1; } else gap_size_v++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? 200:-150); prev_gap=MM[threadIdx.x]-260; step_right=gap_h[threadIdx.x]-11; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=gap_size_v; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt=gap_size_h[threadIdx.x].y; direction_index[640*j+threadIdx.x]=curmt; //if(threadIdx.x==read_reference_number.x-3) //printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack); if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=threadIdx.x; // } //printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } if(threadIdx.x==read_reference_number.x-1) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==read_reference_number.x-1) { //printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { int total_size=0; FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; double computation_time2=0;//total_time=0; timespec start,finish; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ int * direction; hipMalloc( (int **) & direction, 500 * (640*1100* sizeof (int))); fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } */ //data preparation. clock_gettime(CLOCK_MONOTONIC_RAW,&start); char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); int new_len=(ref_len+4-1)/4; total_size+=ref_len*read_len; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); dim3 block(128); hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice); dim3 grid(size); // clock_gettime(CLOCK_MONOTONIC_RAW,&start); hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,direction); //result // hipDeviceSynchronize(); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // computation_time+=diff(start,finish); // clock_gettime(CLOCK_MONOTONIC_RAW,&start); hipLaunchKernelGGL(( calculate_cigar_2), dim3(grid),dim3(block), 0, 0, size,result_d,cigar,cigar_int,direction); //result // hipDeviceSynchronize(); hipMemcpy(result_h,result_d,size*sizeof(int)*4,hipMemcpyDeviceToHost); hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost); hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // computation_time+=diff(start,finish); /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+2]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ //hipFree(direction); hipFree(data_d_total); hipFree(cigar); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); free(inputdata); free(cigar_h); free(cigar_int_h); free(data_h_total); computation_time2+=diff(start,finish); fscanf(file,"%d",&size); } hipFree(direction); // printf(" computation_time= %e total_time=%e \n",computation_time,0); printf(" computation_time= %e computation_time= %e %d GCUPs=%lf\n",computation_time,computation_time2,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
smithwaterman_change_4_stage2_orignal_real_data.cu
#include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; __shared__ int mismatch; __shared__ int match; __shared__ int open; __shared__ int extend; __shared__ short2 * direction_index; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; read_base_array=(char *) (data+num_add[offset].address_array); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); direction_index=(short2 *) (direction+offset*640*1100); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[130]; __shared__ int gap_h[130]; //insertion __shared__ short2 gap_size_h[130]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; //__shared__ char cigar_m[128]; //__shared__ int cigar_int_m[128]; //int final_result; //int final_i; //int final_j; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; match=200; mismatch=-150; open=-260; extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; { char read_base; read_base=read_base_array[threadIdx.x]; int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; int gap_size_v=0; //Deletion int M=0; //now int step_right; //now int ki=0;//insertion h negetive //deletion v int MMM=0; short mt=0; short2 curmt; curmt.x=0; curmt.y=0; int current_reference_id=0; for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { int prev_gap=M-260; //M which is cacluated by last step in the same thread gap_v+=-11; if(prev_gap>gap_v) { gap_v=prev_gap; gap_size_v=1; } else gap_size_v++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? 200:-150); prev_gap=MM[threadIdx.x]-260; step_right=gap_h[threadIdx.x]-11; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=gap_size_v; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt=gap_size_h[threadIdx.x].y; direction_index[640*j+threadIdx.x]=curmt; //if(threadIdx.x==read_reference_number.x-3) //printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack); if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=threadIdx.x; // } //printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } if(threadIdx.x==read_reference_number.x-1) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==read_reference_number.x-1) { //printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { int total_size=0; FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; double computation_time2=0;//total_time=0; timespec start,finish; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ int * direction; cudaMalloc( (int **) & direction, 500 * (640*1100* sizeof (int))); fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } */ //data preparation. clock_gettime(CLOCK_MONOTONIC_RAW,&start); char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); int new_len=(ref_len+4-1)/4; total_size+=ref_len*read_len; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); dim3 block(128); cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice); dim3 grid(size); // clock_gettime(CLOCK_MONOTONIC_RAW,&start); calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,direction); //result // cudaDeviceSynchronize(); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // computation_time+=diff(start,finish); // clock_gettime(CLOCK_MONOTONIC_RAW,&start); calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result // cudaDeviceSynchronize(); cudaMemcpy(result_h,result_d,size*sizeof(int)*4,cudaMemcpyDeviceToHost); cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost); cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost); // clock_gettime(CLOCK_MONOTONIC_RAW,&finish); // computation_time+=diff(start,finish); /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+2]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ //cudaFree(direction); cudaFree(data_d_total); cudaFree(cigar); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); free(inputdata); free(cigar_h); free(cigar_int_h); free(data_h_total); computation_time2+=diff(start,finish); fscanf(file,"%d",&size); } cudaFree(direction); // printf(" computation_time= %e total_time=%e \n",computation_time,0); printf(" computation_time= %e computation_time= %e %d GCUPs=%lf\n",computation_time,computation_time2,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
b5d3281296b792f02be91fa262d1ab7db0b9a6e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <accelerate_cuda.h> extern "C" __global__ void generate(const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0) { const int shapeSize = shOut_2 * (shOut_1 * shOut_0); const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 tmp_0 = ix; const Int64 tmp_1 = tmp_0 / shOut_0; const Int64 tmp_2 = tmp_1 / shOut_1; arrOut_0[ix] = 1.0; } }
b5d3281296b792f02be91fa262d1ab7db0b9a6e6.cu
#include <accelerate_cuda.h> extern "C" __global__ void generate(const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_0) { const int shapeSize = shOut_2 * (shOut_1 * shOut_0); const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 tmp_0 = ix; const Int64 tmp_1 = tmp_0 / shOut_0; const Int64 tmp_2 = tmp_1 / shOut_1; arrOut_0[ix] = 1.0; } }
ff9a8b7e1bbbb10f009f140bc7ce68d22851f0d4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdint.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} int main( int argc, char *argv[] ) { int ITERATIONS = 1; //int numBytes = 131072; int numBytes = 131072*2; uint64_t *memory_to_access; //HANDLE_ERROR(hipHostMalloc(&memory_to_access,sizeof(uint64_t)*numBytes,0)); HANDLE_ERROR(hipMallocManaged(&memory_to_access,sizeof(uint64_t)*numBytes)); for(int k=0;k< numBytes ;k++) memory_to_access[k]=5; //printf("address = %p\n",memory_to_access); //printf("Press enter to continue...\n"); //getchar(); uint64_t fake=0; for(int i=0; i<ITERATIONS; i++) { for (int j = 0; j < (numBytes); j += 8) { fake += memory_to_access[j]; fake += memory_to_access[j + 1]; fake += memory_to_access[j + 2]; fake += memory_to_access[j + 3]; fake += memory_to_access[j + 4]; fake += memory_to_access[j + 5]; fake += memory_to_access[j + 6]; fake += memory_to_access[j + 7]; } } //printf("Press enter to continue...\n"); //getchar(); //hipHostFree(memory_to_access); hipFree(memory_to_access); return 0; }
ff9a8b7e1bbbb10f009f140bc7ce68d22851f0d4.cu
#include <stdint.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} int main( int argc, char *argv[] ) { int ITERATIONS = 1; //int numBytes = 131072; int numBytes = 131072*2; uint64_t *memory_to_access; //HANDLE_ERROR(cudaHostAlloc(&memory_to_access,sizeof(uint64_t)*numBytes,0)); HANDLE_ERROR(cudaMallocManaged(&memory_to_access,sizeof(uint64_t)*numBytes)); for(int k=0;k< numBytes ;k++) memory_to_access[k]=5; //printf("address = %p\n",memory_to_access); //printf("Press enter to continue...\n"); //getchar(); uint64_t fake=0; for(int i=0; i<ITERATIONS; i++) { for (int j = 0; j < (numBytes); j += 8) { fake += memory_to_access[j]; fake += memory_to_access[j + 1]; fake += memory_to_access[j + 2]; fake += memory_to_access[j + 3]; fake += memory_to_access[j + 4]; fake += memory_to_access[j + 5]; fake += memory_to_access[j + 6]; fake += memory_to_access[j + 7]; } } //printf("Press enter to continue...\n"); //getchar(); //cudaFreeHost(memory_to_access); cudaFree(memory_to_access); return 0; }
f8b5a05dfaeb0d824fe9476a596ef161fa200882.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include "datatypes.h" #include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <vector> #include <shrUtils.h> #include <cutil_inline.h> using namespace std; /** sample function: sum up path from root to node */ __device__ void computation(TreeNode *parent, TreeNode *curr) { curr->value = curr->value + parent->value; } /** one step of the traversal */ __global__ void tt_step( int64_t lvl_start, int64_t lvl_end, int64_t nnodes_blk, TreeNode *slab ) { // range that this block will work on TreeNode *start = &slab[lvl_start + nnodes_blk * blockIdx.x]; TreeNode *end = &start[nnodes_blk]; if (end > &slab[lvl_end]) { end = &slab[lvl_end]; } // loop over all of the nodes for (TreeNode *n = start + threadIdx.x; n < end; n += blockDim.x) { computation(&slab[n->parent_idx], n); } } TreeNode *Tree::copy_to_gpu() { TreeNode *res = NULL; cutilSafeCall(hipMalloc((void**) &res, nbytes())); cutilSafeCall(hipMemcpy(res, slab, nbytes(), hipMemcpyHostToDevice)); return res; } /** make sure to sync before this! */ Tree Tree::copy_from_gpu(TreeNode *gpu_ptr) { Tree res(tree_sz); res.levels = levels; cutilSafeCall( hipMemcpy( res.slab, gpu_ptr, nbytes(), hipMemcpyDeviceToHost)); return res; } int64_t div_ceil(int64_t x, int64_t y) { return (x / y) + ((x % y != 0) ? 1 : 0); } /** NOTE: a bit of copied code */ int main(int argc, char** argv) { Tree t = generate_tree(20); t.print(); // use command-line specified CUDA device, // otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); TreeNode *gpu_tree = t.copy_to_gpu(); cutilSafeCall( hipDeviceSynchronize() ); shrLog("*** Running test on %d bytes of input\n", t.nbytes()); Tree::LevelIter it = t.levels->begin(); ++it; // skip the root for (; it != t.levels->end(); it++) { TreeLevel lvl = *it; int64_t nthreads_blk = 2; // NOTE: can make each thread process more than one node int64_t nnodes_blk = nthreads_blk; int64_t nblocks = div_ceil(lvl.nnodes, nnodes_blk); cout << "running with " << nblocks << " blocks of " << nthreads_blk << " threads each" << endl; hipLaunchKernelGGL(( tt_step), dim3(nblocks), dim3(nthreads_blk), 0, 0, lvl.start, lvl.end(), nnodes_blk, gpu_tree); } cutilSafeCall( hipDeviceSynchronize() ); Tree t_res = t.copy_from_gpu(gpu_tree); cout << endl << endl << "=== After computation" << endl; t_res.print(); return 0; }
f8b5a05dfaeb0d824fe9476a596ef161fa200882.cu
// includes, system #include "datatypes.h" #include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <vector> #include <shrUtils.h> #include <cutil_inline.h> using namespace std; /** sample function: sum up path from root to node */ __device__ void computation(TreeNode *parent, TreeNode *curr) { curr->value = curr->value + parent->value; } /** one step of the traversal */ __global__ void tt_step( int64_t lvl_start, int64_t lvl_end, int64_t nnodes_blk, TreeNode *slab ) { // range that this block will work on TreeNode *start = &slab[lvl_start + nnodes_blk * blockIdx.x]; TreeNode *end = &start[nnodes_blk]; if (end > &slab[lvl_end]) { end = &slab[lvl_end]; } // loop over all of the nodes for (TreeNode *n = start + threadIdx.x; n < end; n += blockDim.x) { computation(&slab[n->parent_idx], n); } } TreeNode *Tree::copy_to_gpu() { TreeNode *res = NULL; cutilSafeCall(cudaMalloc((void**) &res, nbytes())); cutilSafeCall(cudaMemcpy(res, slab, nbytes(), cudaMemcpyHostToDevice)); return res; } /** make sure to sync before this! */ Tree Tree::copy_from_gpu(TreeNode *gpu_ptr) { Tree res(tree_sz); res.levels = levels; cutilSafeCall( cudaMemcpy( res.slab, gpu_ptr, nbytes(), cudaMemcpyDeviceToHost)); return res; } int64_t div_ceil(int64_t x, int64_t y) { return (x / y) + ((x % y != 0) ? 1 : 0); } /** NOTE: a bit of copied code */ int main(int argc, char** argv) { Tree t = generate_tree(20); t.print(); // use command-line specified CUDA device, // otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); TreeNode *gpu_tree = t.copy_to_gpu(); cutilSafeCall( cudaThreadSynchronize() ); shrLog("*** Running test on %d bytes of input\n", t.nbytes()); Tree::LevelIter it = t.levels->begin(); ++it; // skip the root for (; it != t.levels->end(); it++) { TreeLevel lvl = *it; int64_t nthreads_blk = 2; // NOTE: can make each thread process more than one node int64_t nnodes_blk = nthreads_blk; int64_t nblocks = div_ceil(lvl.nnodes, nnodes_blk); cout << "running with " << nblocks << " blocks of " << nthreads_blk << " threads each" << endl; tt_step<<<nblocks, nthreads_blk>>>(lvl.start, lvl.end(), nnodes_blk, gpu_tree); } cutilSafeCall( cudaThreadSynchronize() ); Tree t_res = t.copy_from_gpu(gpu_tree); cout << endl << endl << "=== After computation" << endl; t_res.print(); return 0; }
1f3d34c6f0157439b0eaab0e883c64d76d83e9ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "head.h" #define tpb 256 extern double *d_t; extern double *d_it; extern double *d_V; extern double *d_dV2; extern double *d_Vnew; extern double *d_m; extern double *d_h; extern double *d_jj; extern double *d_d; extern double *d_f; extern double *d_X; extern double *d_cai; __global__ void comp_dV2(double *d_V ,double *d_dV2 ){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); int id = k+(nx+2)+1+(2*j);//index d_dV2[k] =D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) +(d_V[id+nx+2] + d_V[id-nx-2]-2*d_V[id])/(dy*dy)); } } void gpu_dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2); hipDeviceSynchronize(); } __global__ void comp_dV2it(double *d_it ,double *d_dV2 ){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ //int j = (int)(k/nx); //int id = k+(nx+2)+1+(2*j);//index d_dV2[k] =-d_it[k]; } } void gpu_dV2it(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2it), dim3(bpg), dim3(tpb), 0, 0, d_it, d_dV2); hipDeviceSynchronize(); } __global__ void plane_waves(double *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j; i = (int)(k/nx); j = k-i*nx; d_dV2[j*ny+i] = d_dV2[j*ny+i] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; // 55 hipLaunchKernelGGL(( plane_waves), dim3(bpg), dim3(tpb), 0, 0, d_dV2); hipDeviceSynchronize(); } __global__ void Euler(double *d_V, double *d_dV2, double *d_Vnew, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*j] + dt*d_dV2[k]; d_V[k+nx+2+1+2*j] = d_Vnew[k]; } if(k==0){ d_t[0] = d_t[0] + dt; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( Euler), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew, d_t); hipDeviceSynchronize(); } __global__ void Euler2(double *d_V, double *d_dV2, double *d_Vnew, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*j] + (dt/2)*d_dV2[k]; d_V[k+nx+2+1+2*j] = d_Vnew[k]; } if(k==0){ d_t[0] = d_t[0] + dt/2; } } void Forward_Euler2(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( Euler2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew, d_t); hipDeviceSynchronize(); } __global__ void boundary(double *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x;//global index if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1];//indexno flux(nx+2)*(ny+2) d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void gpu_Boun(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; // 1, hipLaunchKernelGGL(( boundary), dim3(bpg), dim3(tpb), 0, 0, d_V); hipDeviceSynchronize(); } void gpu_step123(int ncount,int stimtime){ int bpg; //tpb = 256; //---1--- bpg = (nx+tpb-1)/tpb; // 1, hipLaunchKernelGGL(( boundary), dim3(bpg), dim3(tpb), 0, 0, d_V); bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2); hipLaunchKernelGGL(( Euler2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew, d_t); //---2--- gpu_Ion(); hipLaunchKernelGGL(( comp_dV2it), dim3(bpg), dim3(tpb), 0, 0, d_it, d_dV2); if (ncount >= 1 && ncount <= stimtime) { bpg = (ny*5+tpb-1)/tpb; // 55 hipLaunchKernelGGL(( plane_waves), dim3(bpg), dim3(tpb), 0, 0, d_dV2); } bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( Euler), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew, d_t); //---3--- bpg = (nx+tpb-1)/tpb; // 1, hipLaunchKernelGGL(( boundary), dim3(bpg), dim3(tpb), 0, 0, d_V); bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2); hipLaunchKernelGGL(( Euler2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew, d_t); hipDeviceSynchronize(); }
1f3d34c6f0157439b0eaab0e883c64d76d83e9ad.cu
#include "head.h" #define tpb 256 extern double *d_t; extern double *d_it; extern double *d_V; extern double *d_dV2; extern double *d_Vnew; extern double *d_m; extern double *d_h; extern double *d_jj; extern double *d_d; extern double *d_f; extern double *d_X; extern double *d_cai; __global__ void comp_dV2(double *d_V ,double *d_dV2 ){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); int id = k+(nx+2)+1+(2*j);//这是什么index? d_dV2[k] =D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) +(d_V[id+nx+2] + d_V[id-nx-2]-2*d_V[id])/(dy*dy)); } } void gpu_dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; comp_dV2<<<bpg, tpb>>>(d_V, d_dV2); cudaDeviceSynchronize(); } __global__ void comp_dV2it(double *d_it ,double *d_dV2 ){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ //int j = (int)(k/nx); //int id = k+(nx+2)+1+(2*j);//这是什么index? d_dV2[k] =-d_it[k]; } } void gpu_dV2it(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; comp_dV2it<<<bpg, tpb>>>(d_it, d_dV2); cudaDeviceSynchronize(); } __global__ void plane_waves(double *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j; i = (int)(k/nx); j = k-i*nx; d_dV2[j*ny+i] = d_dV2[j*ny+i] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; // 因为刺激5列,所以开5列线程就够了 plane_waves<<<bpg, tpb>>>(d_dV2); cudaDeviceSynchronize(); } __global__ void Euler(double *d_V, double *d_dV2, double *d_Vnew, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*j] + dt*d_dV2[k]; d_V[k+nx+2+1+2*j] = d_Vnew[k]; } if(k==0){ d_t[0] = d_t[0] + dt; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; Euler<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew, d_t); cudaDeviceSynchronize(); } __global__ void Euler2(double *d_V, double *d_dV2, double *d_Vnew, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int j = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*j] + (dt/2)*d_dV2[k]; d_V[k+nx+2+1+2*j] = d_Vnew[k]; } if(k==0){ d_t[0] = d_t[0] + dt/2; } } void Forward_Euler2(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; Euler2<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew, d_t); cudaDeviceSynchronize(); } __global__ void boundary(double *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x;//这是global index if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1];//这些index是怎么对应?是no flux边界,这是扩充后的情况,(nx+2)*(ny+2) d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void gpu_Boun(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; // 边界条件只需要1列线程,算四条语句 boundary<<<bpg, tpb>>>(d_V); cudaDeviceSynchronize(); } void gpu_step123(int ncount,int stimtime){ int bpg; //tpb = 256; //---1--- bpg = (nx+tpb-1)/tpb; // 边界条件只需要1列线程,算四条语句 boundary<<<bpg, tpb>>>(d_V); bpg = (nx*ny+tpb-1)/tpb; comp_dV2<<<bpg, tpb>>>(d_V, d_dV2); Euler2<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew, d_t); //---2--- gpu_Ion(); comp_dV2it<<<bpg, tpb>>>(d_it, d_dV2); if (ncount >= 1 && ncount <= stimtime) { bpg = (ny*5+tpb-1)/tpb; // 因为刺激5列,所以开5列线程就够了 plane_waves<<<bpg, tpb>>>(d_dV2); } bpg = (nx*ny+tpb-1)/tpb; Euler<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew, d_t); //---3--- bpg = (nx+tpb-1)/tpb; // 边界条件只需要1列线程,算四条语句 boundary<<<bpg, tpb>>>(d_V); bpg = (nx*ny+tpb-1)/tpb; comp_dV2<<<bpg, tpb>>>(d_V, d_dV2); Euler2<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew, d_t); cudaDeviceSynchronize(); }
33cddf20352d91f0e4f6323889a500d5cef6a58e.hip
// !!! This is a file automatically generated by hipify!!! /* This version assigns one thread per 16 bytes of text.(one text block) Stores the plaintext/ciphertext in registers. Stores the encryption keys in shared memory. Stores the S-boxes in shared memory. The blocksize is 512. */ #include <iostream> #include <fstream> #include <sstream> #include <chrono> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> typedef unsigned char uint8; enum workMode { ENCRYPTION, DECRYPTION }; //Key generation constants uint8 C1[] = { 0x51,0x7c,0xc1,0xb7,0x27,0x22,0x0a,0x94,0xfe,0x13,0xab,0xe8,0xfa,0x9a,0x6e,0xe0 }; uint8 C2[] = { 0x6d,0xb1,0x4a,0xcc,0x9e,0x21,0xc8,0x20,0xff,0x28,0xb1,0xd5,0xef,0x5d,0xe2,0xb0 }; uint8 C3[] = { 0xdb,0x92,0x37,0x1d,0x21,0x26,0xe9,0x70,0x03,0x24,0x97,0x75,0x04,0xe8,0xc9,0x0e }; //Encryption round keys uint8 ek[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //Decyription round keys uint8 dk[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //S-boxes static const uint8 SB1[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; static const uint8 SB2[256] = { 0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1, 0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1, 0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB, 0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB, 0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD, 0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53, 0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1, 0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40, 0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC, 0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5, 0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43, 0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8, 0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA, 0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C, 0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D, 0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81 }; static const uint8 SB3[256] = { 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D }; static const uint8 SB4[256] = { 0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C, 0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D, 0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D, 0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED, 0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B, 0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE, 0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9, 0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41, 0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A, 0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7, 0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC, 0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5, 0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45, 0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D, 0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3, 0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60 }; uint8 hex2dec(char ch) { if (ch >= '0' && ch <= '9') return ch - '0'; else return ch - 'a' + 10; } uint8 leftRotate(uint8 n, uint8 d) { return (n << d) | (n >> (8 - d)); } uint8 rightRotate(uint8 n, uint8 d) { return (n >> d) | (n << (8 - d)); } uint8* RightShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*) malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[arrSize - amount + i]; } for (int i = arrSize - 1; i >= amount; i--) { newArr[i] = arr[i - amount]; } for (int i = 0; i < amount; i++) { newArr[i] = tmp[i]; } free(tmp); return newArr; } uint8* LeftShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*)malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[i]; } for (int i = 0; i < arrSize - amount; i++) { newArr[i] = arr[i + amount]; } for (int i = 0; i < amount; i++) { newArr[arrSize - amount + i] = tmp[i]; } free(tmp); return newArr; } uint8* ShiftArrR(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = RightShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[arrSize - 1] & (0xff >> (8 - amount));//bits that are shifted to byte on right for (int i = 0; i < arrSize; i++) { carryTmp = arr[i] & (0xff >> (8 - amount));//calculate carry for byte on right arr[i] >>= amount;//right shift the current byte. arr[i] |= rightRotate(carry, amount);//place the bits from coming from byte on left carry = carryTmp; } return arr; } uint8* ShiftArrL(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = LeftShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[0] & (0xff << (8 - amount));//bits that are shifted to byte on left for (int i = arrSize - 1; i >= 0; i--) { carryTmp = arr[i] & (0xff << (8 - amount));//calculate carry for byte on left arr[i] <<= amount;//left shift the current byte. arr[i] |= leftRotate(carry, amount);//place the bits from coming from byte on right carry = carryTmp; } return arr; } void XOR_16(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } } void XOR_16wFree(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } free(y); } //Substition Layer 1 void SL1(uint8* in, uint8* out) { out[0] = SB1[in[0]]; out[1] = SB2[in[1]]; out[2] = SB3[in[2]]; out[3] = SB4[in[3]]; out[4] = SB1[in[4]]; out[5] = SB2[in[5]]; out[6] = SB3[in[6]]; out[7] = SB4[in[7]]; out[8] = SB1[in[8]]; out[9] = SB2[in[9]]; out[10] = SB3[in[10]]; out[11] = SB4[in[11]]; out[12] = SB1[in[12]]; out[13] = SB2[in[13]]; out[14] = SB3[in[14]]; out[15] = SB4[in[15]]; } //Substition Layer 2(Inverse of SL1) void SL2(uint8* in, uint8* out) { out[0] = SB3[in[0]]; out[1] = SB4[in[1]]; out[2] = SB1[in[2]]; out[3] = SB2[in[3]]; out[4] = SB3[in[4]]; out[5] = SB4[in[5]]; out[6] = SB1[in[6]]; out[7] = SB2[in[7]]; out[8] = SB3[in[8]]; out[9] = SB4[in[9]]; out[10] = SB1[in[10]]; out[11] = SB2[in[11]]; out[12] = SB3[in[12]]; out[13] = SB4[in[13]]; out[14] = SB1[in[14]]; out[15] = SB2[in[15]]; } //Diffusion layer void A(uint8* in, uint8* out) { out[0] = in[3] ^ in[4] ^ in[6] ^ in[8] ^ in[9] ^ in[13] ^ in[14]; out[1] = in[2] ^ in[5] ^ in[7] ^ in[8] ^ in[9] ^ in[12] ^ in[15]; out[2] = in[1] ^ in[4] ^ in[6] ^ in[10] ^ in[11] ^ in[12] ^ in[15]; out[3] = in[0] ^ in[5] ^ in[7] ^ in[10] ^ in[11] ^ in[13] ^ in[14]; out[4] = in[0] ^ in[2] ^ in[5] ^ in[8] ^ in[11] ^ in[14] ^ in[15]; out[5] = in[1] ^ in[3] ^ in[4] ^ in[9] ^ in[10] ^ in[14] ^ in[15]; out[6] = in[0] ^ in[2] ^ in[7] ^ in[9] ^ in[10] ^ in[12] ^ in[13]; out[7] = in[1] ^ in[3] ^ in[6] ^ in[8] ^ in[11] ^ in[12] ^ in[13]; out[8] = in[0] ^ in[1] ^ in[4] ^ in[7] ^ in[10] ^ in[13] ^ in[15]; out[9] = in[0] ^ in[1] ^ in[5] ^ in[6] ^ in[11] ^ in[12] ^ in[14]; out[10] = in[2] ^ in[3] ^ in[5] ^ in[6] ^ in[8] ^ in[13] ^ in[15]; out[11] = in[2] ^ in[3] ^ in[4] ^ in[7] ^ in[9] ^ in[12] ^ in[14]; out[12] = in[1] ^ in[2] ^ in[6] ^ in[7] ^ in[9] ^ in[11] ^ in[12]; out[13] = in[0] ^ in[3] ^ in[6] ^ in[7] ^ in[8] ^ in[10] ^ in[13]; out[14] = in[0] ^ in[3] ^ in[4] ^ in[5] ^ in[9] ^ in[11] ^ in[14]; out[15] = in[1] ^ in[2] ^ in[4] ^ in[5] ^ in[8] ^ in[10] ^ in[15]; } /*Round Functions(F0,FE) takes 16 bytes of plaintext and generates an intermediate val of 16bytes */ //Odd Round Function void F0(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL1(res1, res2); A(res2, out); } //Even Round Function void FE(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL2(res1, res2); A(res2, out); } void GenerateRoundKeys(uint8* W0, uint8* W1, uint8* W2, uint8* W3) { //Producing encryption round keys //Producing encryption round keys can be parallelized. //However since we do this once for all blocks, it is faster to compute in CPU. //ShiftArr functions return array from heap, must free. XOR_16wFree(W0, ShiftArrR(W1, 19), &ek[0]); XOR_16wFree(W1, ShiftArrR(W2, 19), &ek[16]); XOR_16wFree(W2, ShiftArrR(W3, 19), &ek[32]); XOR_16wFree(W3, ShiftArrR(W0, 19), &ek[48]); XOR_16wFree(W0, ShiftArrR(W1, 31), &ek[64]); XOR_16wFree(W1, ShiftArrR(W2, 31), &ek[80]); XOR_16wFree(W2, ShiftArrR(W3, 31), &ek[96]); XOR_16wFree(W3, ShiftArrR(W0, 31), &ek[112]); XOR_16wFree(W0, ShiftArrL(W1, 61), &ek[128]); XOR_16wFree(W1, ShiftArrL(W2, 61), &ek[144]); XOR_16wFree(W2, ShiftArrL(W3, 61), &ek[160]); XOR_16wFree(W3, ShiftArrL(W0, 61), &ek[176]); XOR_16wFree(W0, ShiftArrL(W1, 31), &ek[192]); XOR_16wFree(W1, ShiftArrL(W2, 31), &ek[208]); XOR_16wFree(W2, ShiftArrL(W3, 31), &ek[224]); XOR_16wFree(W3, ShiftArrL(W0, 31), &ek[240]); XOR_16wFree(W0, ShiftArrL(W1, 19), &ek[256]); } void GenerateDecRoundKeys(uint8 numOfRounds) { int N = numOfRounds - 1; int k = 1; for (int i = 0; i < 16; i++) { dk[i] = ek[16 * N + i]; } for (int i = N - 1; i >= 1; i--) { A(&ek[i * 16], &dk[k * 16]); k++; } for (int i = 0; i < 16; i++) { dk[k * 16 + i] = ek[i]; } } //Odd Round Function __device__ void F0_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL1) D[0] = SB1[D[0]]; D[1] = SB2[D[1]]; D[2] = SB3[D[2]]; D[3] = SB4[D[3]]; D[4] = SB1[D[4]]; D[5] = SB2[D[5]]; D[6] = SB3[D[6]]; D[7] = SB4[D[7]]; D[8] = SB1[D[8]]; D[9] = SB2[D[9]]; D[10] = SB3[D[10]]; D[11] = SB4[D[11]]; D[12] = SB1[D[12]]; D[13] = SB2[D[13]]; D[14] = SB3[D[14]]; D[15] = SB4[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } //Even Round Function __device__ void FE_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL2) D[0] = SB3[D[0]]; D[1] = SB4[D[1]]; D[2] = SB1[D[2]]; D[3] = SB2[D[3]]; D[4] = SB3[D[4]]; D[5] = SB4[D[5]]; D[6] = SB1[D[6]]; D[7] = SB2[D[7]]; D[8] = SB3[D[8]]; D[9] = SB4[D[9]]; D[10] = SB1[D[10]]; D[11] = SB2[D[11]]; D[12] = SB3[D[12]]; D[13] = SB4[D[13]]; D[14] = SB1[D[14]]; D[15] = SB2[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } template <unsigned int keySize> __global__ void Encrypt(uint8* plainText, unsigned long textSize, uint8* ek, uint8* SB_gmem) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; uint8 plainTextR[16];//registers keeping the plaintext. __shared__ uint8 keySmem[272];//each round key is 16 bytes, there are 17 round keys 272 bytes __shared__ uint8 SB1[256]; __shared__ uint8 SB2[256]; __shared__ uint8 SB3[256]; __shared__ uint8 SB4[256]; //Load encryption round keys to shared memory. if (tid < 272) { keySmem[tid] = ek[tid]; } //Load SB tables to shared memory. if (tid < 256) { SB1[tid] = SB_gmem[tid]; SB2[tid] = SB_gmem[tid + 256]; SB3[tid] = SB_gmem[tid + 512]; SB4[tid] = SB_gmem[tid + 768]; } //Load the plaintext to registers for (int i = 0; i < 16; i++) { plainTextR[i] = plainText[16 * idx + i]; } __syncthreads(); if (keySize == 16)//128-bit keys { F0_d(plainTextR, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(plainTextR, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[160], SB1, SB2, SB3, SB4);//...ek11 #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[176 + i];//ek12 } plainTextR[0] = SB3[plainTextR[0]]; plainTextR[1] = SB4[plainTextR[1]]; plainTextR[2] = SB1[plainTextR[2]]; plainTextR[3] = SB2[plainTextR[3]]; plainTextR[4] = SB3[plainTextR[4]]; plainTextR[5] = SB4[plainTextR[5]]; plainTextR[6] = SB1[plainTextR[6]]; plainTextR[7] = SB2[plainTextR[7]]; plainTextR[8] = SB3[plainTextR[8]]; plainTextR[9] = SB4[plainTextR[9]]; plainTextR[10] = SB1[plainTextR[10]]; plainTextR[11] = SB2[plainTextR[11]]; plainTextR[12] = SB3[plainTextR[12]]; plainTextR[13] = SB4[plainTextR[13]]; plainTextR[14] = SB1[plainTextR[14]]; plainTextR[15] = SB2[plainTextR[15]]; #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[192 + i];//ek13 } //Write back to global memory for (int i = 0; i < 16; i++) { plainText[16 * idx + i] = plainTextR[i]; } } else if (keySize == 24)//192-bit keys { F0_d(plainTextR, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(plainTextR, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[192], SB1, SB2, SB3, SB4);//ek13 #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[208 + i];//ek14 } plainTextR[0] = SB3[plainTextR[0]]; plainTextR[1] = SB4[plainTextR[1]]; plainTextR[2] = SB1[plainTextR[2]]; plainTextR[3] = SB2[plainTextR[3]]; plainTextR[4] = SB3[plainTextR[4]]; plainTextR[5] = SB4[plainTextR[5]]; plainTextR[6] = SB1[plainTextR[6]]; plainTextR[7] = SB2[plainTextR[7]]; plainTextR[8] = SB3[plainTextR[8]]; plainTextR[9] = SB4[plainTextR[9]]; plainTextR[10] = SB1[plainTextR[10]]; plainTextR[11] = SB2[plainTextR[11]]; plainTextR[12] = SB3[plainTextR[12]]; plainTextR[13] = SB4[plainTextR[13]]; plainTextR[14] = SB1[plainTextR[14]]; plainTextR[15] = SB2[plainTextR[15]]; #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[224 + i];//ek15 } //Write back to global memory for (int i = 0; i < 16; i++) { plainText[16 * idx + i] = plainTextR[i]; } } else//256-bit keys { F0_d(plainTextR, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(plainTextR, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[192], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[208], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[224], SB1, SB2, SB3, SB4);//ek15 #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[240 + i];//ek16 } plainTextR[0] = SB3[plainTextR[0]]; plainTextR[1] = SB4[plainTextR[1]]; plainTextR[2] = SB1[plainTextR[2]]; plainTextR[3] = SB2[plainTextR[3]]; plainTextR[4] = SB3[plainTextR[4]]; plainTextR[5] = SB4[plainTextR[5]]; plainTextR[6] = SB1[plainTextR[6]]; plainTextR[7] = SB2[plainTextR[7]]; plainTextR[8] = SB3[plainTextR[8]]; plainTextR[9] = SB4[plainTextR[9]]; plainTextR[10] = SB1[plainTextR[10]]; plainTextR[11] = SB2[plainTextR[11]]; plainTextR[12] = SB3[plainTextR[12]]; plainTextR[13] = SB4[plainTextR[13]]; plainTextR[14] = SB1[plainTextR[14]]; plainTextR[15] = SB2[plainTextR[15]]; #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[256 + i];//ek17 } //Write back to global memory for (int i = 0; i < 16; i++) { plainText[16 * idx + i] = plainTextR[i]; } } } int main(void) { /////////INPUT PART BEGIN////////////////////// enum workMode workmode = ENCRYPTION; //Device pointers: uint8* deviceArr, *ek_d, *dk_d, *SB_dev; FILE *file; uint8* inputText;//either Plaintext or Ciphertext based on workmode; unsigned long int fileLen, textSize; uint8 numOfRounds; const uint8 keySize = 32; uint8 key[32] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}; file = fopen("../input.txt", "r"); if (file) { char buf[2]; fseek(file, 0, SEEK_END); fileLen = ftell(file); fseek(file, 0, SEEK_SET); textSize = fileLen / 2; inputText = (uint8*)malloc(textSize); for (int i = 0; i < textSize; i++) { buf[0] = fgetc(file); buf[1] = fgetc(file); uint8 hexVal = (uint8)strtol(buf, NULL, 16); inputText[i] = hexVal; } } else { printf("File not found.\n"); return -1; } /////////INPUT PART END////////////////////// if (keySize == 16) numOfRounds = 13; else if (keySize == 24) numOfRounds = 15; else numOfRounds = 17; uint8 KL[16];//KL = leftmost 16 bytes of key uint8 KR[16];//KR = rightmost 16 bytes of key /* Most significant byte is stored in 0th index. KL = leftmost 16 bytes of key KR = rightmost 16 bytes of key */ for (int i = 0; i < 16; i++) { KL[i] = key[i]; } for (int i = 0; i < 16; i++) { KR[i] = key[i + 16]; } uint8* CK1, *CK2, *CK3; if (keySize == 16) { CK1 = C1; CK2 = C2; CK3 = C3; } else if (keySize == 24) { CK1 = C2; CK2 = C3; CK3 = C1; } else { CK1 = C3; CK2 = C1; CK3 = C2; } //Calculate round key generators W0,W1,W2,W3 uint8* W0 = KL; uint8 W1[16]; uint8 W2[16]; uint8 W3[16]; uint8 Fres[16];//auxilary array /* W0, W1, W2, W3 are calculated only once and used for all blocks. Since the key data W0 and CK1 are small enough this key generators are calculated in CPU. W1 needed for calc of W2, W2 needed for calc of W3. F0 and FE are also used in the encryption process. */ F0(W0, CK1, Fres); XOR_16(Fres, KR, W1); FE(W1, CK2, Fres); XOR_16(Fres, W0, W2); F0(W2, CK3, Fres); XOR_16(Fres, W1, W3); GenerateRoundKeys(W0, W1, W2, W3); /* Because each thread will process 16 bytes we need textSize/16 threads in total. Then thread number per block is: ceil(textSize/(16*blockSize)) bytes. To decide blockSize we must consider the main occupancy limiter, in this case number of registers per SM. Based on NVIDIA's programming guide Number of 32-bit registers per multiprocessor for compute capability >= 5.0 is 64K. In this code 16 registers used for plaintext, 16 registers auxilary, +1 by itself, each thread uses 33 registers. Then blocksize must be smaller than 64k/33. And larger than 272 since first 272 threads loads the shared memory. 512, 1024 are available blockSizes. 256 can also be tried but number of threads loading the shared memory must be decreased. Keeping the round keys in registers results in low number of warps per SM therefore poor performance. */ int blockSize = 512; int numOfBlocks = ceil((float)(textSize) / (16 * blockSize)); if (workmode == ENCRYPTION)//ENCRYPT { uint8* resCipherText = (uint8*)malloc(textSize); hipMalloc((void**)& deviceArr, textSize); hipMalloc((void**)& ek_d, 272); hipMalloc((void**)& SB_dev, 1024); //START TIMER. using namespace std::chrono; high_resolution_clock::time_point start = high_resolution_clock::now(); hipMemcpy(deviceArr, inputText, textSize, hipMemcpyHostToDevice); hipMemcpy(ek_d, ek, 272, hipMemcpyHostToDevice); //Move Substition layer tables to global memory.(will be moved to shared memory in the kernel.) hipMemcpy(SB_dev, SB1, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 256, SB2, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 512, SB3, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 768, SB4, 256, hipMemcpyHostToDevice); Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, ek_d, SB_dev); hipMemcpy(resCipherText, deviceArr, textSize, hipMemcpyDeviceToHost); //END TIMER; PRINT ELAPSED TIME. high_resolution_clock::time_point end = high_resolution_clock::now(); duration<double> timeElapsed = duration_cast<duration<double>>(end - start); std::cout << "Time elapsed: " << timeElapsed.count() << std::endl; //Print/write to file FILE *f = fopen("output.txt", "w"); for (int i = 0; i < textSize; i++) { fprintf(f, "%02x", resCipherText[i]); } fclose(f); //free hipFree(deviceArr); hipFree(ek_d); free(resCipherText); } else //DECRYPT { //Decryption round keys are derived from the encryption round keys which is generated by GenerateRoundKeys. GenerateDecRoundKeys(numOfRounds); uint8* resPlainText = (uint8*)malloc(textSize); hipMalloc((void**)& deviceArr, textSize); hipMalloc((void**)& dk_d, 272); hipMalloc((void**)& SB_dev, 1024); hipMemcpy(deviceArr, inputText, textSize, hipMemcpyHostToDevice); hipMemcpy(dk_d, dk, 272, hipMemcpyHostToDevice); //Move Substition layer tables to global memory.(will be moved to shared memory in the kernel.) hipMemcpy(SB_dev, SB1, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 256, SB2, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 512, SB3, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 768, SB4, 256, hipMemcpyHostToDevice); Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, dk_d, SB_dev); hipMemcpy(resPlainText, deviceArr, textSize, hipMemcpyDeviceToHost); //Print/write to file FILE *f = fopen("output.txt", "w"); for (int i = 0; i < textSize; i++) { fprintf(f, "%02x", resPlainText[i]); } fclose(f); //free hipFree(deviceArr); hipFree(dk_d); free(resPlainText); } return 0; }
33cddf20352d91f0e4f6323889a500d5cef6a58e.cu
/* This version assigns one thread per 16 bytes of text.(one text block) Stores the plaintext/ciphertext in registers. Stores the encryption keys in shared memory. Stores the S-boxes in shared memory. The blocksize is 512. */ #include <iostream> #include <fstream> #include <sstream> #include <chrono> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> typedef unsigned char uint8; enum workMode { ENCRYPTION, DECRYPTION }; //Key generation constants uint8 C1[] = { 0x51,0x7c,0xc1,0xb7,0x27,0x22,0x0a,0x94,0xfe,0x13,0xab,0xe8,0xfa,0x9a,0x6e,0xe0 }; uint8 C2[] = { 0x6d,0xb1,0x4a,0xcc,0x9e,0x21,0xc8,0x20,0xff,0x28,0xb1,0xd5,0xef,0x5d,0xe2,0xb0 }; uint8 C3[] = { 0xdb,0x92,0x37,0x1d,0x21,0x26,0xe9,0x70,0x03,0x24,0x97,0x75,0x04,0xe8,0xc9,0x0e }; //Encryption round keys uint8 ek[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //Decyription round keys uint8 dk[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //S-boxes static const uint8 SB1[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; static const uint8 SB2[256] = { 0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1, 0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1, 0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB, 0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB, 0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD, 0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53, 0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1, 0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40, 0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC, 0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5, 0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43, 0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8, 0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA, 0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C, 0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D, 0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81 }; static const uint8 SB3[256] = { 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D }; static const uint8 SB4[256] = { 0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C, 0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D, 0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D, 0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED, 0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B, 0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE, 0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9, 0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41, 0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A, 0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7, 0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC, 0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5, 0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45, 0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D, 0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3, 0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60 }; uint8 hex2dec(char ch) { if (ch >= '0' && ch <= '9') return ch - '0'; else return ch - 'a' + 10; } uint8 leftRotate(uint8 n, uint8 d) { return (n << d) | (n >> (8 - d)); } uint8 rightRotate(uint8 n, uint8 d) { return (n >> d) | (n << (8 - d)); } uint8* RightShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*) malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[arrSize - amount + i]; } for (int i = arrSize - 1; i >= amount; i--) { newArr[i] = arr[i - amount]; } for (int i = 0; i < amount; i++) { newArr[i] = tmp[i]; } free(tmp); return newArr; } uint8* LeftShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*)malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[i]; } for (int i = 0; i < arrSize - amount; i++) { newArr[i] = arr[i + amount]; } for (int i = 0; i < amount; i++) { newArr[arrSize - amount + i] = tmp[i]; } free(tmp); return newArr; } uint8* ShiftArrR(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = RightShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[arrSize - 1] & (0xff >> (8 - amount));//bits that are shifted to byte on right for (int i = 0; i < arrSize; i++) { carryTmp = arr[i] & (0xff >> (8 - amount));//calculate carry for byte on right arr[i] >>= amount;//right shift the current byte. arr[i] |= rightRotate(carry, amount);//place the bits from coming from byte on left carry = carryTmp; } return arr; } uint8* ShiftArrL(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = LeftShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[0] & (0xff << (8 - amount));//bits that are shifted to byte on left for (int i = arrSize - 1; i >= 0; i--) { carryTmp = arr[i] & (0xff << (8 - amount));//calculate carry for byte on left arr[i] <<= amount;//left shift the current byte. arr[i] |= leftRotate(carry, amount);//place the bits from coming from byte on right carry = carryTmp; } return arr; } void XOR_16(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } } void XOR_16wFree(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } free(y); } //Substition Layer 1 void SL1(uint8* in, uint8* out) { out[0] = SB1[in[0]]; out[1] = SB2[in[1]]; out[2] = SB3[in[2]]; out[3] = SB4[in[3]]; out[4] = SB1[in[4]]; out[5] = SB2[in[5]]; out[6] = SB3[in[6]]; out[7] = SB4[in[7]]; out[8] = SB1[in[8]]; out[9] = SB2[in[9]]; out[10] = SB3[in[10]]; out[11] = SB4[in[11]]; out[12] = SB1[in[12]]; out[13] = SB2[in[13]]; out[14] = SB3[in[14]]; out[15] = SB4[in[15]]; } //Substition Layer 2(Inverse of SL1) void SL2(uint8* in, uint8* out) { out[0] = SB3[in[0]]; out[1] = SB4[in[1]]; out[2] = SB1[in[2]]; out[3] = SB2[in[3]]; out[4] = SB3[in[4]]; out[5] = SB4[in[5]]; out[6] = SB1[in[6]]; out[7] = SB2[in[7]]; out[8] = SB3[in[8]]; out[9] = SB4[in[9]]; out[10] = SB1[in[10]]; out[11] = SB2[in[11]]; out[12] = SB3[in[12]]; out[13] = SB4[in[13]]; out[14] = SB1[in[14]]; out[15] = SB2[in[15]]; } //Diffusion layer void A(uint8* in, uint8* out) { out[0] = in[3] ^ in[4] ^ in[6] ^ in[8] ^ in[9] ^ in[13] ^ in[14]; out[1] = in[2] ^ in[5] ^ in[7] ^ in[8] ^ in[9] ^ in[12] ^ in[15]; out[2] = in[1] ^ in[4] ^ in[6] ^ in[10] ^ in[11] ^ in[12] ^ in[15]; out[3] = in[0] ^ in[5] ^ in[7] ^ in[10] ^ in[11] ^ in[13] ^ in[14]; out[4] = in[0] ^ in[2] ^ in[5] ^ in[8] ^ in[11] ^ in[14] ^ in[15]; out[5] = in[1] ^ in[3] ^ in[4] ^ in[9] ^ in[10] ^ in[14] ^ in[15]; out[6] = in[0] ^ in[2] ^ in[7] ^ in[9] ^ in[10] ^ in[12] ^ in[13]; out[7] = in[1] ^ in[3] ^ in[6] ^ in[8] ^ in[11] ^ in[12] ^ in[13]; out[8] = in[0] ^ in[1] ^ in[4] ^ in[7] ^ in[10] ^ in[13] ^ in[15]; out[9] = in[0] ^ in[1] ^ in[5] ^ in[6] ^ in[11] ^ in[12] ^ in[14]; out[10] = in[2] ^ in[3] ^ in[5] ^ in[6] ^ in[8] ^ in[13] ^ in[15]; out[11] = in[2] ^ in[3] ^ in[4] ^ in[7] ^ in[9] ^ in[12] ^ in[14]; out[12] = in[1] ^ in[2] ^ in[6] ^ in[7] ^ in[9] ^ in[11] ^ in[12]; out[13] = in[0] ^ in[3] ^ in[6] ^ in[7] ^ in[8] ^ in[10] ^ in[13]; out[14] = in[0] ^ in[3] ^ in[4] ^ in[5] ^ in[9] ^ in[11] ^ in[14]; out[15] = in[1] ^ in[2] ^ in[4] ^ in[5] ^ in[8] ^ in[10] ^ in[15]; } /*Round Functions(F0,FE) takes 16 bytes of plaintext and generates an intermediate val of 16bytes */ //Odd Round Function void F0(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL1(res1, res2); A(res2, out); } //Even Round Function void FE(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL2(res1, res2); A(res2, out); } void GenerateRoundKeys(uint8* W0, uint8* W1, uint8* W2, uint8* W3) { //Producing encryption round keys //Producing encryption round keys can be parallelized. //However since we do this once for all blocks, it is faster to compute in CPU. //ShiftArr functions return array from heap, must free. XOR_16wFree(W0, ShiftArrR(W1, 19), &ek[0]); XOR_16wFree(W1, ShiftArrR(W2, 19), &ek[16]); XOR_16wFree(W2, ShiftArrR(W3, 19), &ek[32]); XOR_16wFree(W3, ShiftArrR(W0, 19), &ek[48]); XOR_16wFree(W0, ShiftArrR(W1, 31), &ek[64]); XOR_16wFree(W1, ShiftArrR(W2, 31), &ek[80]); XOR_16wFree(W2, ShiftArrR(W3, 31), &ek[96]); XOR_16wFree(W3, ShiftArrR(W0, 31), &ek[112]); XOR_16wFree(W0, ShiftArrL(W1, 61), &ek[128]); XOR_16wFree(W1, ShiftArrL(W2, 61), &ek[144]); XOR_16wFree(W2, ShiftArrL(W3, 61), &ek[160]); XOR_16wFree(W3, ShiftArrL(W0, 61), &ek[176]); XOR_16wFree(W0, ShiftArrL(W1, 31), &ek[192]); XOR_16wFree(W1, ShiftArrL(W2, 31), &ek[208]); XOR_16wFree(W2, ShiftArrL(W3, 31), &ek[224]); XOR_16wFree(W3, ShiftArrL(W0, 31), &ek[240]); XOR_16wFree(W0, ShiftArrL(W1, 19), &ek[256]); } void GenerateDecRoundKeys(uint8 numOfRounds) { int N = numOfRounds - 1; int k = 1; for (int i = 0; i < 16; i++) { dk[i] = ek[16 * N + i]; } for (int i = N - 1; i >= 1; i--) { A(&ek[i * 16], &dk[k * 16]); k++; } for (int i = 0; i < 16; i++) { dk[k * 16 + i] = ek[i]; } } //Odd Round Function __device__ void F0_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL1) D[0] = SB1[D[0]]; D[1] = SB2[D[1]]; D[2] = SB3[D[2]]; D[3] = SB4[D[3]]; D[4] = SB1[D[4]]; D[5] = SB2[D[5]]; D[6] = SB3[D[6]]; D[7] = SB4[D[7]]; D[8] = SB1[D[8]]; D[9] = SB2[D[9]]; D[10] = SB3[D[10]]; D[11] = SB4[D[11]]; D[12] = SB1[D[12]]; D[13] = SB2[D[13]]; D[14] = SB3[D[14]]; D[15] = SB4[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } //Even Round Function __device__ void FE_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL2) D[0] = SB3[D[0]]; D[1] = SB4[D[1]]; D[2] = SB1[D[2]]; D[3] = SB2[D[3]]; D[4] = SB3[D[4]]; D[5] = SB4[D[5]]; D[6] = SB1[D[6]]; D[7] = SB2[D[7]]; D[8] = SB3[D[8]]; D[9] = SB4[D[9]]; D[10] = SB1[D[10]]; D[11] = SB2[D[11]]; D[12] = SB3[D[12]]; D[13] = SB4[D[13]]; D[14] = SB1[D[14]]; D[15] = SB2[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } template <unsigned int keySize> __global__ void Encrypt(uint8* plainText, unsigned long textSize, uint8* ek, uint8* SB_gmem) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; uint8 plainTextR[16];//registers keeping the plaintext. __shared__ uint8 keySmem[272];//each round key is 16 bytes, there are 17 round keys 272 bytes __shared__ uint8 SB1[256]; __shared__ uint8 SB2[256]; __shared__ uint8 SB3[256]; __shared__ uint8 SB4[256]; //Load encryption round keys to shared memory. if (tid < 272) { keySmem[tid] = ek[tid]; } //Load SB tables to shared memory. if (tid < 256) { SB1[tid] = SB_gmem[tid]; SB2[tid] = SB_gmem[tid + 256]; SB3[tid] = SB_gmem[tid + 512]; SB4[tid] = SB_gmem[tid + 768]; } //Load the plaintext to registers for (int i = 0; i < 16; i++) { plainTextR[i] = plainText[16 * idx + i]; } __syncthreads(); if (keySize == 16)//128-bit keys { F0_d(plainTextR, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(plainTextR, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[160], SB1, SB2, SB3, SB4);//...ek11 #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[176 + i];//ek12 } plainTextR[0] = SB3[plainTextR[0]]; plainTextR[1] = SB4[plainTextR[1]]; plainTextR[2] = SB1[plainTextR[2]]; plainTextR[3] = SB2[plainTextR[3]]; plainTextR[4] = SB3[plainTextR[4]]; plainTextR[5] = SB4[plainTextR[5]]; plainTextR[6] = SB1[plainTextR[6]]; plainTextR[7] = SB2[plainTextR[7]]; plainTextR[8] = SB3[plainTextR[8]]; plainTextR[9] = SB4[plainTextR[9]]; plainTextR[10] = SB1[plainTextR[10]]; plainTextR[11] = SB2[plainTextR[11]]; plainTextR[12] = SB3[plainTextR[12]]; plainTextR[13] = SB4[plainTextR[13]]; plainTextR[14] = SB1[plainTextR[14]]; plainTextR[15] = SB2[plainTextR[15]]; #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[192 + i];//ek13 } //Write back to global memory for (int i = 0; i < 16; i++) { plainText[16 * idx + i] = plainTextR[i]; } } else if (keySize == 24)//192-bit keys { F0_d(plainTextR, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(plainTextR, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[192], SB1, SB2, SB3, SB4);//ek13 #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[208 + i];//ek14 } plainTextR[0] = SB3[plainTextR[0]]; plainTextR[1] = SB4[plainTextR[1]]; plainTextR[2] = SB1[plainTextR[2]]; plainTextR[3] = SB2[plainTextR[3]]; plainTextR[4] = SB3[plainTextR[4]]; plainTextR[5] = SB4[plainTextR[5]]; plainTextR[6] = SB1[plainTextR[6]]; plainTextR[7] = SB2[plainTextR[7]]; plainTextR[8] = SB3[plainTextR[8]]; plainTextR[9] = SB4[plainTextR[9]]; plainTextR[10] = SB1[plainTextR[10]]; plainTextR[11] = SB2[plainTextR[11]]; plainTextR[12] = SB3[plainTextR[12]]; plainTextR[13] = SB4[plainTextR[13]]; plainTextR[14] = SB1[plainTextR[14]]; plainTextR[15] = SB2[plainTextR[15]]; #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[224 + i];//ek15 } //Write back to global memory for (int i = 0; i < 16; i++) { plainText[16 * idx + i] = plainTextR[i]; } } else//256-bit keys { F0_d(plainTextR, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(plainTextR, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[192], SB1, SB2, SB3, SB4); FE_d(plainTextR, &keySmem[208], SB1, SB2, SB3, SB4); F0_d(plainTextR, &keySmem[224], SB1, SB2, SB3, SB4);//ek15 #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[240 + i];//ek16 } plainTextR[0] = SB3[plainTextR[0]]; plainTextR[1] = SB4[plainTextR[1]]; plainTextR[2] = SB1[plainTextR[2]]; plainTextR[3] = SB2[plainTextR[3]]; plainTextR[4] = SB3[plainTextR[4]]; plainTextR[5] = SB4[plainTextR[5]]; plainTextR[6] = SB1[plainTextR[6]]; plainTextR[7] = SB2[plainTextR[7]]; plainTextR[8] = SB3[plainTextR[8]]; plainTextR[9] = SB4[plainTextR[9]]; plainTextR[10] = SB1[plainTextR[10]]; plainTextR[11] = SB2[plainTextR[11]]; plainTextR[12] = SB3[plainTextR[12]]; plainTextR[13] = SB4[plainTextR[13]]; plainTextR[14] = SB1[plainTextR[14]]; plainTextR[15] = SB2[plainTextR[15]]; #pragma unroll for (int i = 0; i < 16; i++) { plainTextR[i] = plainTextR[i] ^ keySmem[256 + i];//ek17 } //Write back to global memory for (int i = 0; i < 16; i++) { plainText[16 * idx + i] = plainTextR[i]; } } } int main(void) { /////////INPUT PART BEGIN////////////////////// enum workMode workmode = ENCRYPTION; //Device pointers: uint8* deviceArr, *ek_d, *dk_d, *SB_dev; FILE *file; uint8* inputText;//either Plaintext or Ciphertext based on workmode; unsigned long int fileLen, textSize; uint8 numOfRounds; const uint8 keySize = 32; uint8 key[32] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}; file = fopen("../input.txt", "r"); if (file) { char buf[2]; fseek(file, 0, SEEK_END); fileLen = ftell(file); fseek(file, 0, SEEK_SET); textSize = fileLen / 2; inputText = (uint8*)malloc(textSize); for (int i = 0; i < textSize; i++) { buf[0] = fgetc(file); buf[1] = fgetc(file); uint8 hexVal = (uint8)strtol(buf, NULL, 16); inputText[i] = hexVal; } } else { printf("File not found.\n"); return -1; } /////////INPUT PART END////////////////////// if (keySize == 16) numOfRounds = 13; else if (keySize == 24) numOfRounds = 15; else numOfRounds = 17; uint8 KL[16];//KL = leftmost 16 bytes of key uint8 KR[16];//KR = rightmost 16 bytes of key /* Most significant byte is stored in 0th index. KL = leftmost 16 bytes of key KR = rightmost 16 bytes of key */ for (int i = 0; i < 16; i++) { KL[i] = key[i]; } for (int i = 0; i < 16; i++) { KR[i] = key[i + 16]; } uint8* CK1, *CK2, *CK3; if (keySize == 16) { CK1 = C1; CK2 = C2; CK3 = C3; } else if (keySize == 24) { CK1 = C2; CK2 = C3; CK3 = C1; } else { CK1 = C3; CK2 = C1; CK3 = C2; } //Calculate round key generators W0,W1,W2,W3 uint8* W0 = KL; uint8 W1[16]; uint8 W2[16]; uint8 W3[16]; uint8 Fres[16];//auxilary array /* W0, W1, W2, W3 are calculated only once and used for all blocks. Since the key data W0 and CK1 are small enough this key generators are calculated in CPU. W1 needed for calc of W2, W2 needed for calc of W3. F0 and FE are also used in the encryption process. */ F0(W0, CK1, Fres); XOR_16(Fres, KR, W1); FE(W1, CK2, Fres); XOR_16(Fres, W0, W2); F0(W2, CK3, Fres); XOR_16(Fres, W1, W3); GenerateRoundKeys(W0, W1, W2, W3); /* Because each thread will process 16 bytes we need textSize/16 threads in total. Then thread number per block is: ceil(textSize/(16*blockSize)) bytes. To decide blockSize we must consider the main occupancy limiter, in this case number of registers per SM. Based on NVIDIA's programming guide Number of 32-bit registers per multiprocessor for compute capability >= 5.0 is 64K. In this code 16 registers used for plaintext, 16 registers auxilary, +1 by itself, each thread uses 33 registers. Then blocksize must be smaller than 64k/33. And larger than 272 since first 272 threads loads the shared memory. 512, 1024 are available blockSizes. 256 can also be tried but number of threads loading the shared memory must be decreased. Keeping the round keys in registers results in low number of warps per SM therefore poor performance. */ int blockSize = 512; int numOfBlocks = ceil((float)(textSize) / (16 * blockSize)); if (workmode == ENCRYPTION)//ENCRYPT { uint8* resCipherText = (uint8*)malloc(textSize); cudaMalloc((void**)& deviceArr, textSize); cudaMalloc((void**)& ek_d, 272); cudaMalloc((void**)& SB_dev, 1024); //START TIMER. using namespace std::chrono; high_resolution_clock::time_point start = high_resolution_clock::now(); cudaMemcpy(deviceArr, inputText, textSize, cudaMemcpyHostToDevice); cudaMemcpy(ek_d, ek, 272, cudaMemcpyHostToDevice); //Move Substition layer tables to global memory.(will be moved to shared memory in the kernel.) cudaMemcpy(SB_dev, SB1, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 256, SB2, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 512, SB3, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 768, SB4, 256, cudaMemcpyHostToDevice); Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, ek_d, SB_dev); cudaMemcpy(resCipherText, deviceArr, textSize, cudaMemcpyDeviceToHost); //END TIMER; PRINT ELAPSED TIME. high_resolution_clock::time_point end = high_resolution_clock::now(); duration<double> timeElapsed = duration_cast<duration<double>>(end - start); std::cout << "Time elapsed: " << timeElapsed.count() << std::endl; //Print/write to file FILE *f = fopen("output.txt", "w"); for (int i = 0; i < textSize; i++) { fprintf(f, "%02x", resCipherText[i]); } fclose(f); //free cudaFree(deviceArr); cudaFree(ek_d); free(resCipherText); } else //DECRYPT { //Decryption round keys are derived from the encryption round keys which is generated by GenerateRoundKeys. GenerateDecRoundKeys(numOfRounds); uint8* resPlainText = (uint8*)malloc(textSize); cudaMalloc((void**)& deviceArr, textSize); cudaMalloc((void**)& dk_d, 272); cudaMalloc((void**)& SB_dev, 1024); cudaMemcpy(deviceArr, inputText, textSize, cudaMemcpyHostToDevice); cudaMemcpy(dk_d, dk, 272, cudaMemcpyHostToDevice); //Move Substition layer tables to global memory.(will be moved to shared memory in the kernel.) cudaMemcpy(SB_dev, SB1, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 256, SB2, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 512, SB3, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 768, SB4, 256, cudaMemcpyHostToDevice); Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, dk_d, SB_dev); cudaMemcpy(resPlainText, deviceArr, textSize, cudaMemcpyDeviceToHost); //Print/write to file FILE *f = fopen("output.txt", "w"); for (int i = 0; i < textSize; i++) { fprintf(f, "%02x", resPlainText[i]); } fclose(f); //free cudaFree(deviceArr); cudaFree(dk_d); free(resPlainText); } return 0; }
363d5ec0edbd76fde9f0c44affe1e7de573f7882.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <stdlib.h> #include <cmath> #include "../parser.h" #include "../Method/mc.h" #include "cudaLib.h" #include "pricer_kernel.cuh" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <sys/time.h> int main(int argc, char ** argv) { const char *infile = argv[1]; Param *P = new Parser(infile); MonteCarlo *mc = new MonteCarlo(P); //Creation du CudaLib permettant d'allouer la mmoire ncessaire sur le GPU et faisant les copies CudaLib* cudaL = new CudaLib(mc); //Alloc des tats dans le GPU hiprandState_t* devStates; hipMalloc ( &devStates, (mc->samples_)*sizeof( hiprandState_t ) ); /// Initialise la grille et les dimensions de chaque bloc dim3 DimGrid(mc->samples_/cudaL->maxDevice,1,1); dim3 DimBlock(cudaL->maxDevice,1,1); //Appel du noyau hipLaunchKernelGGL(( priceGPU) , dim3(DimGrid), dim3(DimBlock), 0, 0, cudaL->tabPrice, cudaL->tabVar, cudaL->tabPath, mc->mod_->size_, mc->mod_->r_, cudaL->spot, cudaL->sigma, cudaL->chol, mc->opt_->T_, mc->opt_->TimeSteps_, cudaL->payoffCoeff, cudaL->lowerBarrier, cudaL->upperBarrier, cudaL->strike, mc->opt_->id_, devStates, cudaL->maxDevice, unsigned(time(NULL))); /* * Rduction */ //Constantes dfinissant la grille utiliser pour l'allocation de la grille int num_elements = mc->samples_; size_t block_size = cudaL->maxDevice; size_t num_blocks = mc->samples_/cudaL->maxDevice; //Allocation des variables qui contiendront les rsultats des rductions float *d_partial_sums_and_total_price; float *device_result_price; float *d_partial_sums_and_total_var; float *device_result_var; hipMalloc((void**)&d_partial_sums_and_total_price, sizeof(float) * num_blocks); hipMalloc((void**)&device_result_price, sizeof(float)); hipMalloc((void**)&d_partial_sums_and_total_var, sizeof(float) * num_blocks); hipMalloc((void**)&device_result_var, sizeof(float)); float payoffReduction = 0.0; float payoffSquareReduction = 0.0; float host_result_price = 0.0; float host_result_var = 0.0; int puissance = (int)(log(num_elements)/log(2)); float *host_tab_price = new float[num_blocks]; float *host_tab_var = new float[num_blocks]; while( puissance >= 9){ // car 2^9 = 512 // launch one kernel to compute, per-block, a partial sum hipLaunchKernelGGL(( block_sum), dim3(num_blocks),dim3(block_size),block_size * sizeof(float), 0, cudaL->tabPrice + (mc->samples_ - num_elements), d_partial_sums_and_total_price, num_elements); hipLaunchKernelGGL(( block_sum), dim3(num_blocks),dim3(block_size),block_size * sizeof(float), 0, cudaL->tabVar + (mc->samples_ - num_elements), d_partial_sums_and_total_var, num_elements); int blocks = (int)(pow(2.0,puissance))/cudaL->maxDevice; hipMemcpy(host_tab_price, d_partial_sums_and_total_price, sizeof(float)*blocks, hipMemcpyDeviceToHost); hipMemcpy(host_tab_var, d_partial_sums_and_total_var, sizeof(float)*blocks, hipMemcpyDeviceToHost); // copy the result back to the host host_result_price = 0.0; host_result_var = 0.0; for(int i = 0; i<blocks; i++){ host_result_price += host_tab_price[i]; host_result_var += host_tab_var[i]; } payoffReduction += host_result_price; payoffSquareReduction += host_result_var; num_elements -= (int)(pow(2.0,puissance)); num_blocks = num_elements/cudaL->maxDevice; puissance = (int)(log(num_elements)/log(2)); } payoffReduction /= mc->samples_; payoffSquareReduction /= mc->samples_; float coeffActu = exp(-mc->mod_->r_*mc->opt_->T_); float varEstimator = exp(- 2 * (mc->mod_->r_ * mc->opt_->T_)) * (payoffSquareReduction - (payoffReduction*payoffReduction)); float prixFin = payoffReduction*coeffActu; float ic = 2 * 1.96 * sqrt(varEstimator)/sqrt(mc->samples_); std::cout<<"Prix : "<<prixFin<<std::endl; std::cout<<"IC : "<<ic<<std::endl; // deallocate device memory hipFree(d_partial_sums_and_total_price); hipFree(device_result_price); hipFree(d_partial_sums_and_total_var); hipFree(device_result_var); delete host_tab_price; delete host_tab_var; delete P; delete mc; delete cudaL; return 0; }
363d5ec0edbd76fde9f0c44affe1e7de573f7882.cu
#include <stdio.h> #include <iostream> #include <stdlib.h> #include <cmath> #include "../parser.h" #include "../Method/mc.h" #include "cudaLib.h" #include "pricer_kernel.cuh" #include <curand.h> #include <curand_kernel.h> #include <sys/time.h> int main(int argc, char ** argv) { const char *infile = argv[1]; Param *P = new Parser(infile); MonteCarlo *mc = new MonteCarlo(P); //Creation du CudaLib permettant d'allouer la mémoire nécessaire sur le GPU et faisant les copies CudaLib* cudaL = new CudaLib(mc); //Alloc des états dans le GPU curandState* devStates; cudaMalloc ( &devStates, (mc->samples_)*sizeof( curandState ) ); /// Initialise la grille et les dimensions de chaque bloc dim3 DimGrid(mc->samples_/cudaL->maxDevice,1,1); dim3 DimBlock(cudaL->maxDevice,1,1); //Appel du noyau priceGPU <<<DimGrid, DimBlock>>>(cudaL->tabPrice, cudaL->tabVar, cudaL->tabPath, mc->mod_->size_, mc->mod_->r_, cudaL->spot, cudaL->sigma, cudaL->chol, mc->opt_->T_, mc->opt_->TimeSteps_, cudaL->payoffCoeff, cudaL->lowerBarrier, cudaL->upperBarrier, cudaL->strike, mc->opt_->id_, devStates, cudaL->maxDevice, unsigned(time(NULL))); /* * Réduction */ //Constantes définissant la grille à utiliser pour l'allocation de la grille int num_elements = mc->samples_; size_t block_size = cudaL->maxDevice; size_t num_blocks = mc->samples_/cudaL->maxDevice; //Allocation des variables qui contiendront les résultats des réductions float *d_partial_sums_and_total_price; float *device_result_price; float *d_partial_sums_and_total_var; float *device_result_var; cudaMalloc((void**)&d_partial_sums_and_total_price, sizeof(float) * num_blocks); cudaMalloc((void**)&device_result_price, sizeof(float)); cudaMalloc((void**)&d_partial_sums_and_total_var, sizeof(float) * num_blocks); cudaMalloc((void**)&device_result_var, sizeof(float)); float payoffReduction = 0.0; float payoffSquareReduction = 0.0; float host_result_price = 0.0; float host_result_var = 0.0; int puissance = (int)(log(num_elements)/log(2)); float *host_tab_price = new float[num_blocks]; float *host_tab_var = new float[num_blocks]; while( puissance >= 9){ // car 2^9 = 512 // launch one kernel to compute, per-block, a partial sum block_sum<<<num_blocks,block_size,block_size * sizeof(float)>>>(cudaL->tabPrice + (mc->samples_ - num_elements), d_partial_sums_and_total_price, num_elements); block_sum<<<num_blocks,block_size,block_size * sizeof(float)>>>(cudaL->tabVar + (mc->samples_ - num_elements), d_partial_sums_and_total_var, num_elements); int blocks = (int)(pow(2.0,puissance))/cudaL->maxDevice; cudaMemcpy(host_tab_price, d_partial_sums_and_total_price, sizeof(float)*blocks, cudaMemcpyDeviceToHost); cudaMemcpy(host_tab_var, d_partial_sums_and_total_var, sizeof(float)*blocks, cudaMemcpyDeviceToHost); // copy the result back to the host host_result_price = 0.0; host_result_var = 0.0; for(int i = 0; i<blocks; i++){ host_result_price += host_tab_price[i]; host_result_var += host_tab_var[i]; } payoffReduction += host_result_price; payoffSquareReduction += host_result_var; num_elements -= (int)(pow(2.0,puissance)); num_blocks = num_elements/cudaL->maxDevice; puissance = (int)(log(num_elements)/log(2)); } payoffReduction /= mc->samples_; payoffSquareReduction /= mc->samples_; float coeffActu = exp(-mc->mod_->r_*mc->opt_->T_); float varEstimator = exp(- 2 * (mc->mod_->r_ * mc->opt_->T_)) * (payoffSquareReduction - (payoffReduction*payoffReduction)); float prixFin = payoffReduction*coeffActu; float ic = 2 * 1.96 * sqrt(varEstimator)/sqrt(mc->samples_); std::cout<<"Prix : "<<prixFin<<std::endl; std::cout<<"IC : "<<ic<<std::endl; // deallocate device memory cudaFree(d_partial_sums_and_total_price); cudaFree(device_result_price); cudaFree(d_partial_sums_and_total_var); cudaFree(device_result_var); delete host_tab_price; delete host_tab_var; delete P; delete mc; delete cudaL; return 0; }
9f25a17ff00e630944162632ee9f23aef88f7479.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CHANNELWISE_SOFTMAX_LAYER_INSTANTIATE #include "lbann/layers/misc/channelwise_softmax.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { using Size3 = gpu_lib::array<size_t,3>; /** @brief Max functor */ template <class T> struct max_op { __device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const { return gpu_lib::max(x1, x2); } }; } // namespace <anon> // ========================================================= // Forward prop // ========================================================= namespace { /** @brief Max reduction over last dimension of 3D tensor. * * Each CUDA block computes the max over a subset of tensor entries * in @c vals and outputs the result to @c maxvals. This should be * repeated multiple times to fully reduce the last tensor dimension. * * Block dimensions: bdimx x 1 x 1 * * Grid dimensions: (vals_dims[2] / bdimx) x vals_dims[1] x vals_dims[0] * * maxvals: vals_dims[0] x vals_dims[1] x (vals_dims[2] / bdimx) */ template <typename TensorDataType, size_t bdimx> __global__ void fp_max_kernel( Size3 vals_dims, const TensorDataType* __restrict__ vals_buffer, Size3 vals_strides, TensorDataType* __restrict__ maxvals_buffer, Size3 maxvals_strides) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x; const size_t bidx = blockIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < vals_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < vals_dims[1]; j += nthreadsy) { // Find largest value for each thread TensorDataType maxval{-gpu_lib::infinity<TensorDataType>()}; for (size_t i = gidx; i < vals_dims[2]; i += nthreadsx) { const auto& val = vals_buffer[k * vals_strides[0] + j * vals_strides[1] + i * vals_strides[2]]; maxval = gpu_lib::max(maxval, val); } // Find largest value for each block maxval = gpu_lib::block_reduce<bdimx,bdimy,bdimz,TensorDataType,max_op<TensorDataType>>(maxval); if (tid == 0) { const auto& pos = (k * maxvals_strides[0] + j * maxvals_strides[1] + bidx * maxvals_strides[2]); maxvals_buffer[pos] = maxval; } } } } /** Compute softmax denominator. * * denom = sum( exp(x_i-shift) ) * * Block dimensions: bdimx x 1 x 1 * * Grid dimensions: (input_dims[2] / bdimx) x input_dims[1] x input_dims[0] * * shifts and denoms are fully-packed 2D tensors with dimensions of * input_dims[0] x input_dims[1]. */ template <typename TensorDataType, size_t bdimx> __global__ void fp_denom_kernel( Size3 input_dims, const TensorDataType* __restrict__ input_buffer, Size3 input_strides, const TensorDataType* __restrict__ shifts, TensorDataType* __restrict__ denoms) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) { // Compute contribution from each thread const auto& shift = shifts[j + k*input_dims[1]]; TensorDataType denom{0.}; for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) { const auto& x = input_buffer[k * input_strides[0] + j * input_strides[1] + i * input_strides[2]]; denom += gpu_lib::exp(x-shift); } // Compute contribution from each block denom = gpu_lib::block_reduce<bdimx,bdimy,bdimz>(denom); if (tid == 0) { gpu_lib::atomic_add(&denoms[j+k*input_dims[1]], denom); } } } } /** Compute softmax. * * y_i = exp(x_i-shift) / denom * * Block dimensions: bdimx x bdimy x bdimz * * Grid dimensions: (input_dims[2] / bdimx) x (input_dims[1] / bdimy) x (input_dims[0] / bdimz) * * shifts and denoms are fully-packed 2D tensors with dimensions of * input_dims[0] x input_dims[1]. */ template <typename TensorDataType> __global__ void fp_output_kernel( Size3 input_dims, const TensorDataType* __restrict__ input_buffer, Size3 input_strides, TensorDataType* __restrict__ output_buffer, Size3 output_strides, const TensorDataType* __restrict__ shifts, const TensorDataType* __restrict__ denoms) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) { const auto& shift = shifts[j + k*input_dims[1]]; const auto& denom = denoms[j + k*input_dims[1]]; for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) { const auto& x = input_buffer[k * input_strides[0] + j * input_strides[1] + i * input_strides[2]]; auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] + i * output_strides[2]]; y = gpu_lib::exp(x-shift) / denom; } } } } /** @brief Forward prop */ template <typename TensorDataType> void fp_impl(size_t num_channels, size_t channel_size, const El::AbstractDistMatrix<TensorDataType>& input, El::AbstractDistMatrix<TensorDataType>& output) { // Local matrices using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>; const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix()); auto& local_output = dynamic_cast<LocalMat&>(output.Matrix()); auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_input)); // Dimensions const size_t local_mini_batch_size = local_input.Width(); // const Size3 input_dims{local_mini_batch_size, num_channels, channel_size}; // Compute softmax shifts LocalMat local_shifts; if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); LocalMat maxvals(grid_dims.x * num_channels, local_mini_batch_size); hydrogen::gpu::LaunchKernel( fp_max_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_input.LockedBuffer(), Size3{static_cast<size_t>(local_input.LDim()), channel_size, 1}, maxvals.Buffer(), Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1}); while (grid_dims.x > 1) { const size_t prev_dim = grid_dims.x; grid_dims.x = (prev_dim + block_size - 1) / block_size; const LocalMat prev_maxvals(std::move(maxvals)); maxvals.Resize(grid_dims.x * num_channels, local_mini_batch_size); hydrogen::gpu::LaunchKernel( fp_max_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, prev_dim}, prev_maxvals.LockedBuffer(), Size3{static_cast<size_t>(prev_maxvals.LDim()), prev_dim, 1}, maxvals.Buffer(), Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1}); } local_shifts = std::move(maxvals); } // Compute softmax denominators LocalMat local_denoms(num_channels, local_mini_batch_size); El::Zero(local_denoms); if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( fp_denom_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_input.LockedBuffer(), Size3{static_cast<size_t>(local_input.LDim()), channel_size, 1}, local_shifts.LockedBuffer(), local_denoms.Buffer()); } // Compute softmax if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_input.LockedBuffer(), Size3{static_cast<size_t>(local_input.LDim()), channel_size, 1}, local_output.Buffer(), Size3{static_cast<size_t>(local_output.LDim()), channel_size, 1}, local_shifts.LockedBuffer(), local_denoms.LockedBuffer()); } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_softmax_layer<TensorDataType,Layout,Device>::fp_compute() { const size_t num_channels = this->get_output_dims().front(); const size_t channel_size = this->get_output_size() / num_channels; fp_impl(num_channels, channel_size, this->get_prev_activations(), this->get_activations()); } // ========================================================= // Backprop // ========================================================= namespace { /** Compute dot product between output and gradient w.r.t. output. * * Block dimensions: bdimx x 1 x 1 * * Grid dimensions: (output_dims[2] / bdimx) x output_dims[1] x output_dims[0] * * y_dot_dy is a fully-packed 2D tensor with dimensions of * output_dims[0] x output_dims[1]. */ template <typename TensorDataType, size_t bdimx> __global__ void bp_y_dot_dy_kernel( Size3 output_dims, const TensorDataType* __restrict__ output_buffer, Size3 output_strides, const TensorDataType* __restrict__ output_grad_buffer, Size3 output_grad_strides, TensorDataType* __restrict__ y_dot_dy) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) { // Compute contribution from each thread TensorDataType _y_dot_dy{0.}; for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) { const auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] + i * output_strides[2]]; const auto& dy = output_grad_buffer[k * output_grad_strides[0] + j * output_grad_strides[1] + i * output_grad_strides[2]]; _y_dot_dy += y * dy; } // Compute contribution from each block _y_dot_dy = gpu_lib::block_reduce<bdimx,bdimy,bdimz>(_y_dot_dy); if (tid == 0) { gpu_lib::atomic_add(&y_dot_dy[j+k*output_dims[1]], _y_dot_dy); } } } } /** Compute gradient w.r.t. input. * * dL/dx_i = y_i * ( dL/dy_i - dot(y,dL/dy) ) * * Block dimensions: bdimx x bdimy x bdimz * * Grid dimensions: (output_dims[2] / bdimx) x (output_dims[1] / bdimy) x (output_dims[0] / bdimz) * * y_dot_dy is a fully-packed 2D tensor with dimensions of * output_dims[0] x output_dims[1]. */ template <typename TensorDataType> __global__ void bp_input_grad_kernel( Size3 output_dims, const TensorDataType* __restrict__ output_buffer, Size3 output_strides, const TensorDataType* __restrict__ output_grad_buffer, Size3 output_grad_strides, TensorDataType* __restrict__ input_grad_buffer, Size3 input_grad_strides, const TensorDataType* __restrict__ y_dot_dy) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) { const auto& _y_dot_dy = y_dot_dy[j + k*output_dims[1]]; for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) { const auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] + i * output_strides[2]]; const auto& dy = output_grad_buffer[k * output_grad_strides[0] + j * output_grad_strides[1] + i * output_grad_strides[2]]; auto& dx = input_grad_buffer[k * input_grad_strides[0] + j * input_grad_strides[1] + i * input_grad_strides[2]]; dx = y * (dy - _y_dot_dy); } } } } /** @brief Backprop */ template <typename TensorDataType> void bp_impl(size_t num_channels, size_t channel_size, const El::AbstractDistMatrix<TensorDataType>& output, const El::AbstractDistMatrix<TensorDataType>& output_grad, El::AbstractDistMatrix<TensorDataType>& input_grad) { // Local matrices using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>; const auto& local_output = dynamic_cast<const LocalMat&>(output.LockedMatrix()); const auto& local_output_grad = dynamic_cast<const LocalMat&>(output_grad.LockedMatrix()); auto& local_input_grad = dynamic_cast<LocalMat&>(input_grad.Matrix()); // Dimensions const size_t local_mini_batch_size = local_output.Width(); // dot(y,dL/dy) LocalMat local_y_dot_dy(num_channels, local_mini_batch_size); El::Zero(local_y_dot_dy); auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_y_dot_dy), gpu::get_sync_info(local_output_grad), gpu::get_sync_info(local_output), gpu::get_sync_info(local_input_grad)); if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( bp_y_dot_dy_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_output.LockedBuffer(), Size3{static_cast<size_t>(local_output.LDim()), channel_size, 1}, local_output_grad.LockedBuffer(), Size3{static_cast<size_t>(local_output_grad.LDim()), channel_size, 1}, local_y_dot_dy.Buffer()); } // Compute gradient w.r.t. input if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( bp_input_grad_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_output.LockedBuffer(), Size3{static_cast<size_t>(local_output.LDim()), channel_size, 1}, local_output_grad.LockedBuffer(), Size3{static_cast<size_t>(local_output_grad.LDim()), channel_size, 1}, local_input_grad.Buffer(), Size3{static_cast<size_t>(local_input_grad.LDim()), channel_size, 1}, local_y_dot_dy.LockedBuffer()); } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_softmax_layer<TensorDataType,Layout,Device>::bp_compute() { const size_t num_channels = this->get_output_dims().front(); const size_t channel_size = this->get_output_size() / num_channels; bp_impl(num_channels, channel_size, this->get_activations(), this->get_prev_error_signals(), this->get_error_signals()); } // ========================================================= // Explicit template instantiation // ========================================================= #define PROTO(T) \ template class channelwise_softmax_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; #include "lbann/macros/instantiate.hpp" } // namespace lbann
9f25a17ff00e630944162632ee9f23aef88f7479.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CHANNELWISE_SOFTMAX_LAYER_INSTANTIATE #include "lbann/layers/misc/channelwise_softmax.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { using Size3 = gpu_lib::array<size_t,3>; /** @brief Max functor */ template <class T> struct max_op { __device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const { return gpu_lib::max(x1, x2); } }; } // namespace <anon> // ========================================================= // Forward prop // ========================================================= namespace { /** @brief Max reduction over last dimension of 3D tensor. * * Each CUDA block computes the max over a subset of tensor entries * in @c vals and outputs the result to @c maxvals. This should be * repeated multiple times to fully reduce the last tensor dimension. * * Block dimensions: bdimx x 1 x 1 * * Grid dimensions: (vals_dims[2] / bdimx) x vals_dims[1] x vals_dims[0] * * maxvals: vals_dims[0] x vals_dims[1] x (vals_dims[2] / bdimx) */ template <typename TensorDataType, size_t bdimx> __global__ void fp_max_kernel( Size3 vals_dims, const TensorDataType* __restrict__ vals_buffer, Size3 vals_strides, TensorDataType* __restrict__ maxvals_buffer, Size3 maxvals_strides) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x; const size_t bidx = blockIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < vals_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < vals_dims[1]; j += nthreadsy) { // Find largest value for each thread TensorDataType maxval{-gpu_lib::infinity<TensorDataType>()}; for (size_t i = gidx; i < vals_dims[2]; i += nthreadsx) { const auto& val = vals_buffer[k * vals_strides[0] + j * vals_strides[1] + i * vals_strides[2]]; maxval = gpu_lib::max(maxval, val); } // Find largest value for each block maxval = gpu_lib::block_reduce<bdimx,bdimy,bdimz,TensorDataType,max_op<TensorDataType>>(maxval); if (tid == 0) { const auto& pos = (k * maxvals_strides[0] + j * maxvals_strides[1] + bidx * maxvals_strides[2]); maxvals_buffer[pos] = maxval; } } } } /** Compute softmax denominator. * * denom = sum( exp(x_i-shift) ) * * Block dimensions: bdimx x 1 x 1 * * Grid dimensions: (input_dims[2] / bdimx) x input_dims[1] x input_dims[0] * * shifts and denoms are fully-packed 2D tensors with dimensions of * input_dims[0] x input_dims[1]. */ template <typename TensorDataType, size_t bdimx> __global__ void fp_denom_kernel( Size3 input_dims, const TensorDataType* __restrict__ input_buffer, Size3 input_strides, const TensorDataType* __restrict__ shifts, TensorDataType* __restrict__ denoms) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) { // Compute contribution from each thread const auto& shift = shifts[j + k*input_dims[1]]; TensorDataType denom{0.}; for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) { const auto& x = input_buffer[k * input_strides[0] + j * input_strides[1] + i * input_strides[2]]; denom += gpu_lib::exp(x-shift); } // Compute contribution from each block denom = gpu_lib::block_reduce<bdimx,bdimy,bdimz>(denom); if (tid == 0) { gpu_lib::atomic_add(&denoms[j+k*input_dims[1]], denom); } } } } /** Compute softmax. * * y_i = exp(x_i-shift) / denom * * Block dimensions: bdimx x bdimy x bdimz * * Grid dimensions: (input_dims[2] / bdimx) x (input_dims[1] / bdimy) x (input_dims[0] / bdimz) * * shifts and denoms are fully-packed 2D tensors with dimensions of * input_dims[0] x input_dims[1]. */ template <typename TensorDataType> __global__ void fp_output_kernel( Size3 input_dims, const TensorDataType* __restrict__ input_buffer, Size3 input_strides, TensorDataType* __restrict__ output_buffer, Size3 output_strides, const TensorDataType* __restrict__ shifts, const TensorDataType* __restrict__ denoms) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) { const auto& shift = shifts[j + k*input_dims[1]]; const auto& denom = denoms[j + k*input_dims[1]]; for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) { const auto& x = input_buffer[k * input_strides[0] + j * input_strides[1] + i * input_strides[2]]; auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] + i * output_strides[2]]; y = gpu_lib::exp(x-shift) / denom; } } } } /** @brief Forward prop */ template <typename TensorDataType> void fp_impl(size_t num_channels, size_t channel_size, const El::AbstractDistMatrix<TensorDataType>& input, El::AbstractDistMatrix<TensorDataType>& output) { // Local matrices using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>; const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix()); auto& local_output = dynamic_cast<LocalMat&>(output.Matrix()); auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_input)); // Dimensions const size_t local_mini_batch_size = local_input.Width(); // const Size3 input_dims{local_mini_batch_size, num_channels, channel_size}; // Compute softmax shifts LocalMat local_shifts; if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); LocalMat maxvals(grid_dims.x * num_channels, local_mini_batch_size); hydrogen::gpu::LaunchKernel( fp_max_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_input.LockedBuffer(), Size3{static_cast<size_t>(local_input.LDim()), channel_size, 1}, maxvals.Buffer(), Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1}); while (grid_dims.x > 1) { const size_t prev_dim = grid_dims.x; grid_dims.x = (prev_dim + block_size - 1) / block_size; const LocalMat prev_maxvals(std::move(maxvals)); maxvals.Resize(grid_dims.x * num_channels, local_mini_batch_size); hydrogen::gpu::LaunchKernel( fp_max_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, prev_dim}, prev_maxvals.LockedBuffer(), Size3{static_cast<size_t>(prev_maxvals.LDim()), prev_dim, 1}, maxvals.Buffer(), Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1}); } local_shifts = std::move(maxvals); } // Compute softmax denominators LocalMat local_denoms(num_channels, local_mini_batch_size); El::Zero(local_denoms); if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( fp_denom_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_input.LockedBuffer(), Size3{static_cast<size_t>(local_input.LDim()), channel_size, 1}, local_shifts.LockedBuffer(), local_denoms.Buffer()); } // Compute softmax if (!local_input.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( fp_output_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_input.LockedBuffer(), Size3{static_cast<size_t>(local_input.LDim()), channel_size, 1}, local_output.Buffer(), Size3{static_cast<size_t>(local_output.LDim()), channel_size, 1}, local_shifts.LockedBuffer(), local_denoms.LockedBuffer()); } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_softmax_layer<TensorDataType,Layout,Device>::fp_compute() { const size_t num_channels = this->get_output_dims().front(); const size_t channel_size = this->get_output_size() / num_channels; fp_impl(num_channels, channel_size, this->get_prev_activations(), this->get_activations()); } // ========================================================= // Backprop // ========================================================= namespace { /** Compute dot product between output and gradient w.r.t. output. * * Block dimensions: bdimx x 1 x 1 * * Grid dimensions: (output_dims[2] / bdimx) x output_dims[1] x output_dims[0] * * y_dot_dy is a fully-packed 2D tensor with dimensions of * output_dims[0] x output_dims[1]. */ template <typename TensorDataType, size_t bdimx> __global__ void bp_y_dot_dy_kernel( Size3 output_dims, const TensorDataType* __restrict__ output_buffer, Size3 output_strides, const TensorDataType* __restrict__ output_grad_buffer, Size3 output_grad_strides, TensorDataType* __restrict__ y_dot_dy) { // Indices and dimensions constexpr size_t bdimy = 1; constexpr size_t bdimz = 1; const size_t tid = threadIdx.x; const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) { // Compute contribution from each thread TensorDataType _y_dot_dy{0.}; for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) { const auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] + i * output_strides[2]]; const auto& dy = output_grad_buffer[k * output_grad_strides[0] + j * output_grad_strides[1] + i * output_grad_strides[2]]; _y_dot_dy += y * dy; } // Compute contribution from each block _y_dot_dy = gpu_lib::block_reduce<bdimx,bdimy,bdimz>(_y_dot_dy); if (tid == 0) { gpu_lib::atomic_add(&y_dot_dy[j+k*output_dims[1]], _y_dot_dy); } } } } /** Compute gradient w.r.t. input. * * dL/dx_i = y_i * ( dL/dy_i - dot(y,dL/dy) ) * * Block dimensions: bdimx x bdimy x bdimz * * Grid dimensions: (output_dims[2] / bdimx) x (output_dims[1] / bdimy) x (output_dims[0] / bdimz) * * y_dot_dy is a fully-packed 2D tensor with dimensions of * output_dims[0] x output_dims[1]. */ template <typename TensorDataType> __global__ void bp_input_grad_kernel( Size3 output_dims, const TensorDataType* __restrict__ output_buffer, Size3 output_strides, const TensorDataType* __restrict__ output_grad_buffer, Size3 output_grad_strides, TensorDataType* __restrict__ input_grad_buffer, Size3 input_grad_strides, const TensorDataType* __restrict__ y_dot_dy) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; const size_t nthreadsz = blockDim.z * gridDim.z; for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) { for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) { const auto& _y_dot_dy = y_dot_dy[j + k*output_dims[1]]; for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) { const auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] + i * output_strides[2]]; const auto& dy = output_grad_buffer[k * output_grad_strides[0] + j * output_grad_strides[1] + i * output_grad_strides[2]]; auto& dx = input_grad_buffer[k * input_grad_strides[0] + j * input_grad_strides[1] + i * input_grad_strides[2]]; dx = y * (dy - _y_dot_dy); } } } } /** @brief Backprop */ template <typename TensorDataType> void bp_impl(size_t num_channels, size_t channel_size, const El::AbstractDistMatrix<TensorDataType>& output, const El::AbstractDistMatrix<TensorDataType>& output_grad, El::AbstractDistMatrix<TensorDataType>& input_grad) { // Local matrices using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>; const auto& local_output = dynamic_cast<const LocalMat&>(output.LockedMatrix()); const auto& local_output_grad = dynamic_cast<const LocalMat&>(output_grad.LockedMatrix()); auto& local_input_grad = dynamic_cast<LocalMat&>(input_grad.Matrix()); // Dimensions const size_t local_mini_batch_size = local_output.Width(); // dot(y,dL/dy) LocalMat local_y_dot_dy(num_channels, local_mini_batch_size); El::Zero(local_y_dot_dy); auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_y_dot_dy), gpu::get_sync_info(local_output_grad), gpu::get_sync_info(local_output), gpu::get_sync_info(local_input_grad)); if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( bp_y_dot_dy_kernel<TensorDataType, block_size>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_output.LockedBuffer(), Size3{static_cast<size_t>(local_output.LDim()), channel_size, 1}, local_output_grad.LockedBuffer(), Size3{static_cast<size_t>(local_output_grad.LDim()), channel_size, 1}, local_y_dot_dy.Buffer()); } // Compute gradient w.r.t. input if (!local_output.IsEmpty()) { constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; grid_dims.z = local_mini_batch_size; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( bp_input_grad_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, Size3{local_mini_batch_size, num_channels, channel_size}, local_output.LockedBuffer(), Size3{static_cast<size_t>(local_output.LDim()), channel_size, 1}, local_output_grad.LockedBuffer(), Size3{static_cast<size_t>(local_output_grad.LDim()), channel_size, 1}, local_input_grad.Buffer(), Size3{static_cast<size_t>(local_input_grad.LDim()), channel_size, 1}, local_y_dot_dy.LockedBuffer()); } } } // namespace <anon> template <typename TensorDataType, data_layout Layout, El::Device Device> void channelwise_softmax_layer<TensorDataType,Layout,Device>::bp_compute() { const size_t num_channels = this->get_output_dims().front(); const size_t channel_size = this->get_output_size() / num_channels; bp_impl(num_channels, channel_size, this->get_activations(), this->get_prev_error_signals(), this->get_error_signals()); } // ========================================================= // Explicit template instantiation // ========================================================= #define PROTO(T) \ template class channelwise_softmax_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; #include "lbann/macros/instantiate.hpp" } // namespace lbann
ae221e7ddc5990ed04d2ff63f4e3fdb44fb30dcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <accelerate_cuda.h> extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const Int64 shIn3_2, const Int64 shIn3_1, const Int64 shIn3_0, const double* __restrict__ arrIn3_2, const double* __restrict__ arrIn3_1, const double* __restrict__ arrIn3_0, const double* __restrict__ arrIn4_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_3, double* __restrict__ arrOut_2, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0) { const int shapeSize = shOut_2 * (shOut_1 * shOut_0); const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 tmp_0 = ix; const Int64 tmp_1 = tmp_0 / shOut_0; const Int64 tmp_2 = tmp_1 / shOut_1; const Int64 sh2 = tmp_2 % shOut_2; const Int64 sh1 = tmp_1 % shOut_1; const Int64 sh0 = tmp_0 % shOut_0; const Int64 v0 = (Int64) 1; const Int64 v1 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0; const Int64 v2 = (sh2 * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0); const Int64 v3 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0); const Int64 v4 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0; const Int64 v5 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + sh0; const Int64 v6 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0); const Int64 v7 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0); const Int64 v8 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0; const double v9 = arrIn2_2[v1]; const double v10 = arrIn2_1[v1]; const double v11 = arrIn2_0[v1]; const double v12 = arrIn2_2[v2]; const double v13 = arrIn2_1[v2]; const double v14 = arrIn2_0[v2]; const double v15 = arrIn2_2[v3]; const double v16 = arrIn2_1[v3]; const double v17 = arrIn2_0[v3]; const double v18 = arrIn2_2[v4]; const double v19 = arrIn2_1[v4]; const double v20 = arrIn2_0[v4]; const double v21 = arrIn2_2[v5]; const double v22 = arrIn2_1[v5]; const double v23 = arrIn2_0[v5]; const double v24 = arrIn2_2[v6]; const double v25 = arrIn2_1[v6]; const double v26 = arrIn2_0[v6]; const double v27 = arrIn2_2[v7]; const double v28 = arrIn2_1[v7]; const double v29 = arrIn2_0[v7]; const double v30 = arrIn2_2[v8]; const double v31 = arrIn2_1[v8]; const double v32 = arrIn2_0[v8]; const double v78 = 8.333333333333333e-2 * (({ const double v33 = v15 - v9; const double v34 = v16 - v10; const double v35 = v17 - v11; const double v36 = v27 - v18; const double v37 = v28 - v19; const double v38 = v29 - v20; const double v39 = v18 - v12; const double v40 = v19 - v13; const double v41 = v20 - v14; const double v42 = v30 - v15; const double v43 = v31 - v16; const double v44 = v32 - v17; const double v45 = v39 + v42; const double v46 = v40 + v43; const double v47 = v41 + v44; ; v45 * (v37 * v35 - v38 * v34) + v46 * (v38 * v33 - v36 * v35) + v47 * (v36 * v34 - v37 * v33); }) + ({ const double v48 = v30 - v9; const double v49 = v31 - v10; const double v50 = v32 - v11; const double v51 = v27 - v21; const double v52 = v28 - v22; const double v53 = v29 - v23; const double v54 = v21 - v18; const double v55 = v22 - v19; const double v56 = v23 - v20; const double v57 = v24 - v30; const double v58 = v25 - v31; const double v59 = v26 - v32; const double v60 = v54 + v57; const double v61 = v55 + v58; const double v62 = v56 + v59; ; v60 * (v52 * v50 - v53 * v49) + v61 * (v53 * v48 - v51 * v50) + v62 * (v51 * v49 - v52 * v48); }) + ({ const double v63 = v24 - v9; const double v64 = v25 - v10; const double v65 = v26 - v11; const double v66 = v27 - v12; const double v67 = v28 - v13; const double v68 = v29 - v14; const double v69 = v12 - v21; const double v70 = v13 - v22; const double v71 = v14 - v23; const double v72 = v15 - v24; const double v73 = v16 - v25; const double v74 = v17 - v26; const double v75 = v69 + v72; const double v76 = v70 + v73; const double v77 = v71 + v74; ; v75 * (v67 * v65 - v68 * v64) + v76 * (v68 * v63 - v66 * v65) + v77 * (v66 * v64 - v67 * v63); })); const double v80 = v78 / ({ const Int64 v79 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v79]; }); const Int64 v82 = (sh2 * shIn3_1 + sh1) * shIn3_0 + sh0; const Int64 v83 = (sh2 * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0); const Int64 v84 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0); const Int64 v85 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0; const Int64 v86 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + sh0; const Int64 v87 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0); const Int64 v88 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0); const Int64 v89 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0; const double v90 = arrIn3_2[v82]; const double v91 = arrIn3_1[v82]; const double v92 = arrIn3_0[v82]; const double v93 = arrIn3_2[v83]; const double v94 = arrIn3_1[v83]; const double v95 = arrIn3_0[v83]; const double v96 = arrIn3_2[v84]; const double v97 = arrIn3_1[v84]; const double v98 = arrIn3_0[v84]; const double v99 = arrIn3_2[v85]; const double v100 = arrIn3_1[v85]; const double v101 = arrIn3_0[v85]; const double v102 = arrIn3_2[v86]; const double v103 = arrIn3_1[v86]; const double v104 = arrIn3_0[v86]; const double v105 = arrIn3_2[v87]; const double v106 = arrIn3_1[v87]; const double v107 = arrIn3_0[v87]; const double v108 = arrIn3_2[v88]; const double v109 = arrIn3_1[v88]; const double v110 = arrIn3_0[v88]; const double v111 = arrIn3_2[v89]; const double v112 = arrIn3_1[v89]; const double v113 = arrIn3_0[v89]; const double v115 = 0.5 * ({ const Int64 v114 = 0; ; arrIn4_0[v114]; }); const double v116 = v115 * v90; const double v117 = v115 * v91; const double v118 = v115 * v92; const double v119 = v115 * v93; const double v120 = v115 * v94; const double v121 = v115 * v95; const double v122 = v115 * v96; const double v123 = v115 * v97; const double v124 = v115 * v98; const double v125 = v115 * v99; const double v126 = v115 * v100; const double v127 = v115 * v101; const double v128 = v115 * v102; const double v129 = v115 * v103; const double v130 = v115 * v104; const double v131 = v115 * v105; const double v132 = v115 * v106; const double v133 = v115 * v107; const double v134 = v115 * v108; const double v135 = v115 * v109; const double v136 = v115 * v110; const double v137 = v115 * v111; const double v138 = v115 * v112; const double v139 = v115 * v113; const double v140 = v9 - v116; const double v141 = v10 - v117; const double v142 = v11 - v118; const double v143 = v12 - v119; const double v144 = v13 - v120; const double v145 = v14 - v121; const double v146 = v15 - v122; const double v147 = v16 - v123; const double v148 = v17 - v124; const double v149 = v18 - v125; const double v150 = v19 - v126; const double v151 = v20 - v127; const double v152 = v21 - v128; const double v153 = v22 - v129; const double v154 = v23 - v130; const double v155 = v24 - v131; const double v156 = v25 - v132; const double v157 = v26 - v133; const double v158 = v27 - v134; const double v159 = v28 - v135; const double v160 = v29 - v136; const double v161 = v30 - v137; const double v162 = v31 - v138; const double v163 = v32 - v139; const double v164 = v152 - v146; const double v165 = v153 - v147; const double v166 = v154 - v148; const double v167 = v155 - v149; const double v168 = v156 - v150; const double v169 = v157 - v151; const double v170 = v158 - v140; const double v171 = v159 - v141; const double v172 = v160 - v142; const double v173 = v161 - v143; const double v174 = v162 - v144; const double v175 = v163 - v145; const double v176 = 0.125 * (v170 + v167 - v173 - v164); const double v177 = 0.125 * (v170 + v167 + v173 + v164); const double v178 = 0.125 * (v172 + v169 - v175 - v166); const double v179 = 0.125 * (v171 + v168 + v174 + v165); const double v180 = 0.125 * (v171 + v168 - v174 - v165); const double v181 = 0.125 * (v172 + v169 + v175 + v166); const double v182 = v179 * v178 - v181 * v180; const double v183 = v181 * v176 - v177 * v178; const double v184 = v177 * v180 - v179 * v176; const double v185 = 0.125 * (v170 + v167 + v173 + v164); const double v186 = 0.125 * (v170 - v167 + v173 - v164); const double v187 = 0.125 * (v172 + v169 + v175 + v166); const double v188 = 0.125 * (v171 - v168 + v174 - v165); const double v189 = 0.125 * (v171 + v168 + v174 + v165); const double v190 = 0.125 * (v172 - v169 + v175 - v166); const double v191 = v188 * v187 - v190 * v189; const double v192 = v190 * v185 - v186 * v187; const double v193 = v186 * v189 - v188 * v185; const double v194 = 0.125 * (v170 - v167 + v173 - v164); const double v195 = 0.125 * (v170 + v167 - v173 - v164); const double v196 = 0.125 * (v172 - v169 + v175 - v166); const double v197 = 0.125 * (v171 + v168 - v174 - v165); const double v198 = 0.125 * (v171 - v168 + v174 - v165); const double v199 = 0.125 * (v172 + v169 - v175 - v166); const double v200 = v197 * v196 - v199 * v198; const double v201 = v199 * v194 - v195 * v196; const double v202 = v195 * v198 - v197 * v194; const double v203 = -v191 - v182 - v200; const double v204 = -v192 - v183 - v201; const double v205 = -v193 - v184 - v202; const double v206 = v191 - v182 - v200; const double v207 = v192 - v183 - v201; const double v208 = v193 - v184 - v202; const double v209 = v191 + v182 - v200; const double v210 = v192 + v183 - v201; const double v211 = v193 + v184 - v202; const double v212 = -v191 + v182 - v200; const double v213 = -v192 + v183 - v201; const double v214 = -v193 + v184 - v202; const double v215 = -(v191 + v182 - v200); const double v216 = -(v192 + v183 - v201); const double v217 = -(v193 + v184 - v202); const double v218 = -(-v191 + v182 - v200); const double v219 = -(-v192 + v183 - v201); const double v220 = -(-v193 + v184 - v202); const double v221 = -(-v191 - v182 - v200); const double v222 = -(-v192 - v183 - v201); const double v223 = -(-v193 - v184 - v202); const double v224 = -(v191 - v182 - v200); const double v225 = -(v192 - v183 - v201); const double v226 = -(v193 - v184 - v202); const double v227 = 8.0 * (0.125 * (v170 - v167 + v173 - v164) * v182 + 0.125 * (v171 - v168 + v174 - v165) * v183 + 0.125 * (v172 - v169 + v175 - v166) * v184); const double v228 = 1.0 / v227; const double v229 = v99 - v105; const double v230 = v100 - v106; const double v231 = v101 - v107; const double v232 = v93 - v111; const double v233 = v94 - v112; const double v234 = v95 - v113; const double v235 = v90 - v108; const double v236 = v91 - v109; const double v237 = v92 - v110; const double v238 = v96 - v102; const double v239 = v97 - v103; const double v240 = v98 - v104; const double v241 = v203 * v235 + v206 * v232 + v209 * v238 + v212 * v229; const double v242 = v203 * v236 + v206 * v233 + v209 * v239 + v212 * v230; const double v243 = v203 * v237 + v206 * v234 + v209 * v240 + v212 * v231; const double v244 = v204 * v235 + v207 * v232 + v210 * v238 + v213 * v229; const double v245 = v204 * v236 + v207 * v233 + v210 * v239 + v213 * v230; const double v246 = v204 * v237 + v207 * v234 + v210 * v240 + v213 * v231; const double v247 = v205 * v235 + v208 * v232 + v211 * v238 + v214 * v229; const double v248 = v205 * v236 + v208 * v233 + v211 * v239 + v214 * v230; const double v249 = v205 * v237 + v208 * v234 + v211 * v240 + v214 * v231; const double v250 = v228 * v241; const double v251 = v228 * v242; const double v252 = v228 * v243; const double v253 = v228 * v244; const double v254 = v228 * v245; const double v255 = v228 * v246; const double v256 = v228 * v247; const double v257 = v228 * v248; const double v258 = v228 * v249; arrOut_3[ix] = v80; arrOut_2[ix] = v80 - ({ const Int64 v81 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0; ; arrIn1_0[v81]; }); arrOut_1[ix] = v250 + v254 + v258; arrOut_0[ix] = 4.0 * v78 / sqrt(fmax(fmax(fmax(fmax(fmax(({ const double v259 = v15 - v9; const double v260 = v16 - v10; const double v261 = v17 - v11; const double v262 = v18 - v12; const double v263 = v19 - v13; const double v264 = v20 - v14; ; ((v259 - v262) * (v259 - v262) + (v260 - v263) * (v260 - v263) + (v261 - v264) * (v261 - v264)) * ((v259 + v262) * (v259 + v262) + (v260 + v263) * (v260 + v263) + (v261 + v264) * (v261 + v264)) - ({ const double v265 = (v259 - v262) * (v259 + v262) + (v260 - v263) * (v260 + v263) + (v261 - v264) * (v261 + v264); ; v265 * v265; }); }), ({ const double v266 = v24 - v9; const double v267 = v25 - v10; const double v268 = v26 - v11; const double v269 = v12 - v21; const double v270 = v13 - v22; const double v271 = v14 - v23; ; ((v266 - v269) * (v266 - v269) + (v267 - v270) * (v267 - v270) + (v268 - v271) * (v268 - v271)) * ((v266 + v269) * (v266 + v269) + (v267 + v270) * (v267 + v270) + (v268 + v271) * (v268 + v271)) - ({ const double v272 = (v266 - v269) * (v266 + v269) + (v267 - v270) * (v267 + v270) + (v268 - v271) * (v268 + v271); ; v272 * v272; }); })), ({ const double v273 = v27 - v12; const double v274 = v28 - v13; const double v275 = v29 - v14; const double v276 = v15 - v24; const double v277 = v16 - v25; const double v278 = v17 - v26; ; ((v273 - v276) * (v273 - v276) + (v274 - v277) * (v274 - v277) + (v275 - v278) * (v275 - v278)) * ((v273 + v276) * (v273 + v276) + (v274 + v277) * (v274 + v277) + (v275 + v278) * (v275 + v278)) - ({ const double v279 = (v273 - v276) * (v273 + v276) + (v274 - v277) * (v274 + v277) + (v275 - v278) * (v275 + v278); ; v279 * v279; }); })), ({ const double v280 = v30 - v15; const double v281 = v31 - v16; const double v282 = v32 - v17; const double v283 = v18 - v27; const double v284 = v19 - v28; const double v285 = v20 - v29; ; ((v280 - v283) * (v280 - v283) + (v281 - v284) * (v281 - v284) + (v282 - v285) * (v282 - v285)) * ((v280 + v283) * (v280 + v283) + (v281 + v284) * (v281 + v284) + (v282 + v285) * (v282 + v285)) - ({ const double v286 = (v280 - v283) * (v280 + v283) + (v281 - v284) * (v281 + v284) + (v282 - v285) * (v282 + v285); ; v286 * v286; }); })), ({ const double v287 = v21 - v18; const double v288 = v22 - v19; const double v289 = v23 - v20; const double v290 = v9 - v30; const double v291 = v10 - v31; const double v292 = v11 - v32; ; ((v287 - v290) * (v287 - v290) + (v288 - v291) * (v288 - v291) + (v289 - v292) * (v289 - v292)) * ((v287 + v290) * (v287 + v290) + (v288 + v291) * (v288 + v291) + (v289 + v292) * (v289 + v292)) - ({ const double v293 = (v287 - v290) * (v287 + v290) + (v288 - v291) * (v288 + v291) + (v289 - v292) * (v289 + v292); ; v293 * v293; }); })), ({ const double v294 = v27 - v21; const double v295 = v28 - v22; const double v296 = v29 - v23; const double v297 = v24 - v30; const double v298 = v25 - v31; const double v299 = v26 - v32; ; ((v294 - v297) * (v294 - v297) + (v295 - v298) * (v295 - v298) + (v296 - v299) * (v296 - v299)) * ((v294 + v297) * (v294 + v297) + (v295 + v298) * (v295 + v298) + (v296 + v299) * (v296 + v299)) - ({ const double v300 = (v294 - v297) * (v294 + v297) + (v295 - v298) * (v295 + v298) + (v296 - v299) * (v296 + v299); ; v300 * v300; }); }))); } }
ae221e7ddc5990ed04d2ff63f4e3fdb44fb30dcc.cu
#include <accelerate_cuda.h> extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const Int64 shIn3_2, const Int64 shIn3_1, const Int64 shIn3_0, const double* __restrict__ arrIn3_2, const double* __restrict__ arrIn3_1, const double* __restrict__ arrIn3_0, const double* __restrict__ arrIn4_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_3, double* __restrict__ arrOut_2, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0) { const int shapeSize = shOut_2 * (shOut_1 * shOut_0); const int gridSize = blockDim.x * gridDim.x; int ix; for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 tmp_0 = ix; const Int64 tmp_1 = tmp_0 / shOut_0; const Int64 tmp_2 = tmp_1 / shOut_1; const Int64 sh2 = tmp_2 % shOut_2; const Int64 sh1 = tmp_1 % shOut_1; const Int64 sh0 = tmp_0 % shOut_0; const Int64 v0 = (Int64) 1; const Int64 v1 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0; const Int64 v2 = (sh2 * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0); const Int64 v3 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0); const Int64 v4 = (sh2 * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0; const Int64 v5 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + sh0; const Int64 v6 = ((v0 + sh2) * shIn2_1 + sh1) * shIn2_0 + (v0 + sh0); const Int64 v7 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + (v0 + sh0); const Int64 v8 = ((v0 + sh2) * shIn2_1 + (v0 + sh1)) * shIn2_0 + sh0; const double v9 = arrIn2_2[v1]; const double v10 = arrIn2_1[v1]; const double v11 = arrIn2_0[v1]; const double v12 = arrIn2_2[v2]; const double v13 = arrIn2_1[v2]; const double v14 = arrIn2_0[v2]; const double v15 = arrIn2_2[v3]; const double v16 = arrIn2_1[v3]; const double v17 = arrIn2_0[v3]; const double v18 = arrIn2_2[v4]; const double v19 = arrIn2_1[v4]; const double v20 = arrIn2_0[v4]; const double v21 = arrIn2_2[v5]; const double v22 = arrIn2_1[v5]; const double v23 = arrIn2_0[v5]; const double v24 = arrIn2_2[v6]; const double v25 = arrIn2_1[v6]; const double v26 = arrIn2_0[v6]; const double v27 = arrIn2_2[v7]; const double v28 = arrIn2_1[v7]; const double v29 = arrIn2_0[v7]; const double v30 = arrIn2_2[v8]; const double v31 = arrIn2_1[v8]; const double v32 = arrIn2_0[v8]; const double v78 = 8.333333333333333e-2 * (({ const double v33 = v15 - v9; const double v34 = v16 - v10; const double v35 = v17 - v11; const double v36 = v27 - v18; const double v37 = v28 - v19; const double v38 = v29 - v20; const double v39 = v18 - v12; const double v40 = v19 - v13; const double v41 = v20 - v14; const double v42 = v30 - v15; const double v43 = v31 - v16; const double v44 = v32 - v17; const double v45 = v39 + v42; const double v46 = v40 + v43; const double v47 = v41 + v44; ; v45 * (v37 * v35 - v38 * v34) + v46 * (v38 * v33 - v36 * v35) + v47 * (v36 * v34 - v37 * v33); }) + ({ const double v48 = v30 - v9; const double v49 = v31 - v10; const double v50 = v32 - v11; const double v51 = v27 - v21; const double v52 = v28 - v22; const double v53 = v29 - v23; const double v54 = v21 - v18; const double v55 = v22 - v19; const double v56 = v23 - v20; const double v57 = v24 - v30; const double v58 = v25 - v31; const double v59 = v26 - v32; const double v60 = v54 + v57; const double v61 = v55 + v58; const double v62 = v56 + v59; ; v60 * (v52 * v50 - v53 * v49) + v61 * (v53 * v48 - v51 * v50) + v62 * (v51 * v49 - v52 * v48); }) + ({ const double v63 = v24 - v9; const double v64 = v25 - v10; const double v65 = v26 - v11; const double v66 = v27 - v12; const double v67 = v28 - v13; const double v68 = v29 - v14; const double v69 = v12 - v21; const double v70 = v13 - v22; const double v71 = v14 - v23; const double v72 = v15 - v24; const double v73 = v16 - v25; const double v74 = v17 - v26; const double v75 = v69 + v72; const double v76 = v70 + v73; const double v77 = v71 + v74; ; v75 * (v67 * v65 - v68 * v64) + v76 * (v68 * v63 - v66 * v65) + v77 * (v66 * v64 - v67 * v63); })); const double v80 = v78 / ({ const Int64 v79 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v79]; }); const Int64 v82 = (sh2 * shIn3_1 + sh1) * shIn3_0 + sh0; const Int64 v83 = (sh2 * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0); const Int64 v84 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0); const Int64 v85 = (sh2 * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0; const Int64 v86 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + sh0; const Int64 v87 = ((v0 + sh2) * shIn3_1 + sh1) * shIn3_0 + (v0 + sh0); const Int64 v88 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + (v0 + sh0); const Int64 v89 = ((v0 + sh2) * shIn3_1 + (v0 + sh1)) * shIn3_0 + sh0; const double v90 = arrIn3_2[v82]; const double v91 = arrIn3_1[v82]; const double v92 = arrIn3_0[v82]; const double v93 = arrIn3_2[v83]; const double v94 = arrIn3_1[v83]; const double v95 = arrIn3_0[v83]; const double v96 = arrIn3_2[v84]; const double v97 = arrIn3_1[v84]; const double v98 = arrIn3_0[v84]; const double v99 = arrIn3_2[v85]; const double v100 = arrIn3_1[v85]; const double v101 = arrIn3_0[v85]; const double v102 = arrIn3_2[v86]; const double v103 = arrIn3_1[v86]; const double v104 = arrIn3_0[v86]; const double v105 = arrIn3_2[v87]; const double v106 = arrIn3_1[v87]; const double v107 = arrIn3_0[v87]; const double v108 = arrIn3_2[v88]; const double v109 = arrIn3_1[v88]; const double v110 = arrIn3_0[v88]; const double v111 = arrIn3_2[v89]; const double v112 = arrIn3_1[v89]; const double v113 = arrIn3_0[v89]; const double v115 = 0.5 * ({ const Int64 v114 = 0; ; arrIn4_0[v114]; }); const double v116 = v115 * v90; const double v117 = v115 * v91; const double v118 = v115 * v92; const double v119 = v115 * v93; const double v120 = v115 * v94; const double v121 = v115 * v95; const double v122 = v115 * v96; const double v123 = v115 * v97; const double v124 = v115 * v98; const double v125 = v115 * v99; const double v126 = v115 * v100; const double v127 = v115 * v101; const double v128 = v115 * v102; const double v129 = v115 * v103; const double v130 = v115 * v104; const double v131 = v115 * v105; const double v132 = v115 * v106; const double v133 = v115 * v107; const double v134 = v115 * v108; const double v135 = v115 * v109; const double v136 = v115 * v110; const double v137 = v115 * v111; const double v138 = v115 * v112; const double v139 = v115 * v113; const double v140 = v9 - v116; const double v141 = v10 - v117; const double v142 = v11 - v118; const double v143 = v12 - v119; const double v144 = v13 - v120; const double v145 = v14 - v121; const double v146 = v15 - v122; const double v147 = v16 - v123; const double v148 = v17 - v124; const double v149 = v18 - v125; const double v150 = v19 - v126; const double v151 = v20 - v127; const double v152 = v21 - v128; const double v153 = v22 - v129; const double v154 = v23 - v130; const double v155 = v24 - v131; const double v156 = v25 - v132; const double v157 = v26 - v133; const double v158 = v27 - v134; const double v159 = v28 - v135; const double v160 = v29 - v136; const double v161 = v30 - v137; const double v162 = v31 - v138; const double v163 = v32 - v139; const double v164 = v152 - v146; const double v165 = v153 - v147; const double v166 = v154 - v148; const double v167 = v155 - v149; const double v168 = v156 - v150; const double v169 = v157 - v151; const double v170 = v158 - v140; const double v171 = v159 - v141; const double v172 = v160 - v142; const double v173 = v161 - v143; const double v174 = v162 - v144; const double v175 = v163 - v145; const double v176 = 0.125 * (v170 + v167 - v173 - v164); const double v177 = 0.125 * (v170 + v167 + v173 + v164); const double v178 = 0.125 * (v172 + v169 - v175 - v166); const double v179 = 0.125 * (v171 + v168 + v174 + v165); const double v180 = 0.125 * (v171 + v168 - v174 - v165); const double v181 = 0.125 * (v172 + v169 + v175 + v166); const double v182 = v179 * v178 - v181 * v180; const double v183 = v181 * v176 - v177 * v178; const double v184 = v177 * v180 - v179 * v176; const double v185 = 0.125 * (v170 + v167 + v173 + v164); const double v186 = 0.125 * (v170 - v167 + v173 - v164); const double v187 = 0.125 * (v172 + v169 + v175 + v166); const double v188 = 0.125 * (v171 - v168 + v174 - v165); const double v189 = 0.125 * (v171 + v168 + v174 + v165); const double v190 = 0.125 * (v172 - v169 + v175 - v166); const double v191 = v188 * v187 - v190 * v189; const double v192 = v190 * v185 - v186 * v187; const double v193 = v186 * v189 - v188 * v185; const double v194 = 0.125 * (v170 - v167 + v173 - v164); const double v195 = 0.125 * (v170 + v167 - v173 - v164); const double v196 = 0.125 * (v172 - v169 + v175 - v166); const double v197 = 0.125 * (v171 + v168 - v174 - v165); const double v198 = 0.125 * (v171 - v168 + v174 - v165); const double v199 = 0.125 * (v172 + v169 - v175 - v166); const double v200 = v197 * v196 - v199 * v198; const double v201 = v199 * v194 - v195 * v196; const double v202 = v195 * v198 - v197 * v194; const double v203 = -v191 - v182 - v200; const double v204 = -v192 - v183 - v201; const double v205 = -v193 - v184 - v202; const double v206 = v191 - v182 - v200; const double v207 = v192 - v183 - v201; const double v208 = v193 - v184 - v202; const double v209 = v191 + v182 - v200; const double v210 = v192 + v183 - v201; const double v211 = v193 + v184 - v202; const double v212 = -v191 + v182 - v200; const double v213 = -v192 + v183 - v201; const double v214 = -v193 + v184 - v202; const double v215 = -(v191 + v182 - v200); const double v216 = -(v192 + v183 - v201); const double v217 = -(v193 + v184 - v202); const double v218 = -(-v191 + v182 - v200); const double v219 = -(-v192 + v183 - v201); const double v220 = -(-v193 + v184 - v202); const double v221 = -(-v191 - v182 - v200); const double v222 = -(-v192 - v183 - v201); const double v223 = -(-v193 - v184 - v202); const double v224 = -(v191 - v182 - v200); const double v225 = -(v192 - v183 - v201); const double v226 = -(v193 - v184 - v202); const double v227 = 8.0 * (0.125 * (v170 - v167 + v173 - v164) * v182 + 0.125 * (v171 - v168 + v174 - v165) * v183 + 0.125 * (v172 - v169 + v175 - v166) * v184); const double v228 = 1.0 / v227; const double v229 = v99 - v105; const double v230 = v100 - v106; const double v231 = v101 - v107; const double v232 = v93 - v111; const double v233 = v94 - v112; const double v234 = v95 - v113; const double v235 = v90 - v108; const double v236 = v91 - v109; const double v237 = v92 - v110; const double v238 = v96 - v102; const double v239 = v97 - v103; const double v240 = v98 - v104; const double v241 = v203 * v235 + v206 * v232 + v209 * v238 + v212 * v229; const double v242 = v203 * v236 + v206 * v233 + v209 * v239 + v212 * v230; const double v243 = v203 * v237 + v206 * v234 + v209 * v240 + v212 * v231; const double v244 = v204 * v235 + v207 * v232 + v210 * v238 + v213 * v229; const double v245 = v204 * v236 + v207 * v233 + v210 * v239 + v213 * v230; const double v246 = v204 * v237 + v207 * v234 + v210 * v240 + v213 * v231; const double v247 = v205 * v235 + v208 * v232 + v211 * v238 + v214 * v229; const double v248 = v205 * v236 + v208 * v233 + v211 * v239 + v214 * v230; const double v249 = v205 * v237 + v208 * v234 + v211 * v240 + v214 * v231; const double v250 = v228 * v241; const double v251 = v228 * v242; const double v252 = v228 * v243; const double v253 = v228 * v244; const double v254 = v228 * v245; const double v255 = v228 * v246; const double v256 = v228 * v247; const double v257 = v228 * v248; const double v258 = v228 * v249; arrOut_3[ix] = v80; arrOut_2[ix] = v80 - ({ const Int64 v81 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0; ; arrIn1_0[v81]; }); arrOut_1[ix] = v250 + v254 + v258; arrOut_0[ix] = 4.0 * v78 / sqrt(fmax(fmax(fmax(fmax(fmax(({ const double v259 = v15 - v9; const double v260 = v16 - v10; const double v261 = v17 - v11; const double v262 = v18 - v12; const double v263 = v19 - v13; const double v264 = v20 - v14; ; ((v259 - v262) * (v259 - v262) + (v260 - v263) * (v260 - v263) + (v261 - v264) * (v261 - v264)) * ((v259 + v262) * (v259 + v262) + (v260 + v263) * (v260 + v263) + (v261 + v264) * (v261 + v264)) - ({ const double v265 = (v259 - v262) * (v259 + v262) + (v260 - v263) * (v260 + v263) + (v261 - v264) * (v261 + v264); ; v265 * v265; }); }), ({ const double v266 = v24 - v9; const double v267 = v25 - v10; const double v268 = v26 - v11; const double v269 = v12 - v21; const double v270 = v13 - v22; const double v271 = v14 - v23; ; ((v266 - v269) * (v266 - v269) + (v267 - v270) * (v267 - v270) + (v268 - v271) * (v268 - v271)) * ((v266 + v269) * (v266 + v269) + (v267 + v270) * (v267 + v270) + (v268 + v271) * (v268 + v271)) - ({ const double v272 = (v266 - v269) * (v266 + v269) + (v267 - v270) * (v267 + v270) + (v268 - v271) * (v268 + v271); ; v272 * v272; }); })), ({ const double v273 = v27 - v12; const double v274 = v28 - v13; const double v275 = v29 - v14; const double v276 = v15 - v24; const double v277 = v16 - v25; const double v278 = v17 - v26; ; ((v273 - v276) * (v273 - v276) + (v274 - v277) * (v274 - v277) + (v275 - v278) * (v275 - v278)) * ((v273 + v276) * (v273 + v276) + (v274 + v277) * (v274 + v277) + (v275 + v278) * (v275 + v278)) - ({ const double v279 = (v273 - v276) * (v273 + v276) + (v274 - v277) * (v274 + v277) + (v275 - v278) * (v275 + v278); ; v279 * v279; }); })), ({ const double v280 = v30 - v15; const double v281 = v31 - v16; const double v282 = v32 - v17; const double v283 = v18 - v27; const double v284 = v19 - v28; const double v285 = v20 - v29; ; ((v280 - v283) * (v280 - v283) + (v281 - v284) * (v281 - v284) + (v282 - v285) * (v282 - v285)) * ((v280 + v283) * (v280 + v283) + (v281 + v284) * (v281 + v284) + (v282 + v285) * (v282 + v285)) - ({ const double v286 = (v280 - v283) * (v280 + v283) + (v281 - v284) * (v281 + v284) + (v282 - v285) * (v282 + v285); ; v286 * v286; }); })), ({ const double v287 = v21 - v18; const double v288 = v22 - v19; const double v289 = v23 - v20; const double v290 = v9 - v30; const double v291 = v10 - v31; const double v292 = v11 - v32; ; ((v287 - v290) * (v287 - v290) + (v288 - v291) * (v288 - v291) + (v289 - v292) * (v289 - v292)) * ((v287 + v290) * (v287 + v290) + (v288 + v291) * (v288 + v291) + (v289 + v292) * (v289 + v292)) - ({ const double v293 = (v287 - v290) * (v287 + v290) + (v288 - v291) * (v288 + v291) + (v289 - v292) * (v289 + v292); ; v293 * v293; }); })), ({ const double v294 = v27 - v21; const double v295 = v28 - v22; const double v296 = v29 - v23; const double v297 = v24 - v30; const double v298 = v25 - v31; const double v299 = v26 - v32; ; ((v294 - v297) * (v294 - v297) + (v295 - v298) * (v295 - v298) + (v296 - v299) * (v296 - v299)) * ((v294 + v297) * (v294 + v297) + (v295 + v298) * (v295 + v298) + (v296 + v299) * (v296 + v299)) - ({ const double v300 = (v294 - v297) * (v294 + v297) + (v295 - v298) * (v295 + v298) + (v296 - v299) * (v296 + v299); ; v300 * v300; }); }))); } }
bf9213c33fba7273c4cbf0144661845be3394912.hip
// !!! This is a file automatically generated by hipify!!! /* * cuTS: Scaling Subgraph Isomorphism on Distributed Multi-GPU Systems Using * Trie Based Data Structure * * Copyright (C) 2021 APPL Laboratories ([email protected]) * * This software is available under the MIT license, a copy of which can be * found in the file 'LICENSE' in the top-level directory. * * For further information contact: * (1) Lizhi Xiang ([email protected]) * (2) Aravind Sukumaran-Rajam ([email protected]) * * The citation information is provided in the 'README' in the top-level * directory. */ /* * For processing large data graphs, set the third argument first depth trunks to some unsigned integer number * (recommended 2-8). * The trunks is helpful to reduce GPU memory expansion for the intermediate results produced during matching. * For small data graphs, there is no need to set the third argument. */ #include "./inc/host_funcs.h" int main(int argc, char *argv[]){ hipSetDevice(0); if (argc < 3) { cout<<"args data_graph path,query_graph_path,first depth trunks(optional)"<<endl; exit(-1); } std::string query_graph_file = argv[2]; std::string data_graph_file = argv[1]; bool write_to_disk = false; if(argc == 3){ unsigned long long int result_len = search(query_graph_file,data_graph_file,write_to_disk); }else{ unsigned int trunks; try { trunks = atoi(argv[3]); }catch(int e){ cout<<"invalid trunks, set trunks = 4"<<endl; trunks = 4; } unsigned long long int result_len = search_dfs_bfs_strategy(query_graph_file,data_graph_file, write_to_disk,trunks); } return 0; }
bf9213c33fba7273c4cbf0144661845be3394912.cu
/* * cuTS: Scaling Subgraph Isomorphism on Distributed Multi-GPU Systems Using * Trie Based Data Structure * * Copyright (C) 2021 APPL Laboratories ([email protected]) * * This software is available under the MIT license, a copy of which can be * found in the file 'LICENSE' in the top-level directory. * * For further information contact: * (1) Lizhi Xiang ([email protected]) * (2) Aravind Sukumaran-Rajam ([email protected]) * * The citation information is provided in the 'README' in the top-level * directory. */ /* * For processing large data graphs, set the third argument first depth trunks to some unsigned integer number * (recommended 2-8). * The trunks is helpful to reduce GPU memory expansion for the intermediate results produced during matching. * For small data graphs, there is no need to set the third argument. */ #include "./inc/host_funcs.h" int main(int argc, char *argv[]){ cudaSetDevice(0); if (argc < 3) { cout<<"args data_graph path,query_graph_path,first depth trunks(optional)"<<endl; exit(-1); } std::string query_graph_file = argv[2]; std::string data_graph_file = argv[1]; bool write_to_disk = false; if(argc == 3){ unsigned long long int result_len = search(query_graph_file,data_graph_file,write_to_disk); }else{ unsigned int trunks; try { trunks = atoi(argv[3]); }catch(int e){ cout<<"invalid trunks, set trunks = 4"<<endl; trunks = 4; } unsigned long long int result_len = search_dfs_bfs_strategy(query_graph_file,data_graph_file, write_to_disk,trunks); } return 0; }
b18ab4be9896b5b17201000d7af1561ce3a7f17f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/index_put_grad_kernel.h" #include <numeric> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/cast_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/index_put_utils.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename T, size_t Rank> __global__ void set_zero_cuda_kernel(const int64_t N, int64_t** indices, phi::Array<int64_t, Rank> stride, phi::Array<int64_t, Rank> shape, T* out) { int64_t idx = threadIdx.x + blockDim.x * blockIdx.x; int64_t cur_ix = 0; if (idx >= N) { return; } int64_t offset = 0; for (int i = 0; i < Rank; ++i) { cur_ix = (static_cast<int64_t>(*(indices[i] + idx))); if (cur_ix < 0) { cur_ix += shape[i]; } offset += stride[i] * cur_ix; } *(out + offset) = 0; } template <typename T, size_t Rank> __global__ void index_put_grad_cuda_kernel(const int64_t N, const T* out_grad, int64_t** indices, phi::Array<int64_t, Rank> stride, phi::Array<int64_t, Rank> shape, T* value_grad) { int64_t idx = threadIdx.x + blockDim.x * blockIdx.x; int64_t cur_ix = 0; if (idx >= N) { return; } int64_t offset = 0; for (int i = 0; i < Rank; ++i) { cur_ix = (static_cast<int64_t>(*(indices[i] + idx))); if (cur_ix < 0) { cur_ix += shape[i]; } offset += stride[i] * cur_ix; } *(value_grad + idx) = *(out_grad + offset); } template <typename T, typename Context, size_t Rank> void LaunchIndexPutGradCudaKernel( const Context& dev_ctx, const std::vector<const DenseTensor*>& indices, const DenseTensor& out_grad, bool accumulate, DenseTensor* value_grad, DenseTensor* x_grad) { if (x_grad) { phi::Copy(dev_ctx, out_grad, dev_ctx.GetPlace(), false, x_grad); if (!accumulate) { T* x_grad_data = x_grad->data<T>(); auto x_grad_dims = x_grad->dims(); const int64_t numel = indices[0]->numel(); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel); auto x_grad_stride = phi::stride(x_grad_dims); phi::Array<int64_t, Rank> stride_a; phi::Array<int64_t, Rank> shape_a; for (size_t idx = 0; idx < Rank; ++idx) { stride_a[idx] = x_grad_stride[idx]; shape_a[idx] = x_grad_dims[idx]; } auto pd_indices = funcs::GetDevicePointerArray<int64_t, Context>(dev_ctx, indices); hipLaunchKernelGGL(( set_zero_cuda_kernel<T, Rank>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), numel, pd_indices, stride_a, shape_a, x_grad_data); } } auto out_grad_dims = out_grad.dims(); const int64_t numel = indices[0]->numel(); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel); auto out_grad_stride = phi::stride(out_grad_dims); phi::Array<int64_t, Rank> stride_a; phi::Array<int64_t, Rank> shape_a; for (size_t idx = 0; idx < Rank; ++idx) { stride_a[idx] = out_grad_stride[idx]; shape_a[idx] = out_grad_dims[idx]; } auto pd_indices = funcs::GetDevicePointerArray<int64_t, Context>(dev_ctx, indices); if (value_grad) { if (value_grad->numel() == 1) { DenseTensor tmp_value_grad(value_grad->dtype()); tmp_value_grad.Resize(indices[0]->dims()); T* tmp_value_grad_data = dev_ctx.template Alloc<T>(&tmp_value_grad); auto out_grad_data = out_grad.data<T>(); hipLaunchKernelGGL(( index_put_grad_cuda_kernel<T, Rank>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), numel, out_grad_data, pd_indices, stride_a, shape_a, tmp_value_grad_data); std::vector<int> v_dims(tmp_value_grad.dims().size()); std::iota(v_dims.begin(), v_dims.end(), 0); IntArray v_axis(v_dims); SumKernel<T, Context>(dev_ctx, tmp_value_grad, v_axis, value_grad->dtype(), false, value_grad); } else if (value_grad->numel() == indices[0]->numel()) { T* value_grad_data = dev_ctx.template Alloc<T>(value_grad); auto out_grad_data = out_grad.data<T>(); hipLaunchKernelGGL(( index_put_grad_cuda_kernel<T, Rank>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), numel, out_grad_data, pd_indices, stride_a, shape_a, value_grad_data); } else { DenseTensor tmp_value_grad(value_grad->dtype()); tmp_value_grad.Resize(indices[0]->dims()); T* tmp_value_grad_data = dev_ctx.template Alloc<T>(&tmp_value_grad); auto out_grad_data = out_grad.data<T>(); hipLaunchKernelGGL(( index_put_grad_cuda_kernel<T, Rank>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), numel, out_grad_data, pd_indices, stride_a, shape_a, tmp_value_grad_data); std::vector<int64_t> after_dims = phi::vectorize(tmp_value_grad.dims()); std::vector<int64_t> before_dims = phi::vectorize(value_grad->dims()); std::vector<int64_t> compress_dims; std::vector<int64_t> dims_without_1; funcs::CalCompressedDimsWith1AndWithout1( &after_dims, &before_dims, &compress_dims, &dims_without_1); auto pre_dims = value_grad->dims(); value_grad->Resize(phi::make_ddim(dims_without_1)); IntArray v_axis(compress_dims); SumKernel<T, Context>(dev_ctx, tmp_value_grad, v_axis, value_grad->dtype(), false, value_grad); value_grad->Resize(pre_dims); } } } template <typename T, typename Context> void IndexPutGradKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector<const DenseTensor*>& indices, const DenseTensor& value, const DenseTensor& out_grad, bool accumulate, DenseTensor* x_grad, DenseTensor* value_grad) { PADDLE_ENFORCE_EQ( x.dtype(), value.dtype(), phi::errors::InvalidArgument( "The data type of tensor value must be same to the data type " "of tensor x.")); std::vector<DenseTensor> tmp_args; std::vector<const phi::DenseTensor*> int_indices_v = funcs::DealWithBoolIndices<T, Context>(dev_ctx, indices, &tmp_args); if (int_indices_v.empty()) { if (x_grad) { phi::Copy(dev_ctx, out_grad, dev_ctx.GetPlace(), false, x_grad); } if (value_grad) { FullKernel<T, Context>(dev_ctx, phi::vectorize(value_grad->dims()), 0.0f, value_grad->dtype(), value_grad); } return; } const size_t total_dims = x.dims().size(); auto bd_dim = funcs::BroadCastTensorsDims(int_indices_v); std::vector<int64_t> res_dim_v(phi::vectorize(bd_dim)); std::vector<const phi::DenseTensor*> res_indices_v(x.dims().size(), nullptr); std::vector<DenseTensor> tmp_res_indices_v; std::vector<DenseTensor> range_tensor_v; for (int i = int_indices_v.size(); i < x.dims().size(); ++i) { range_tensor_v.emplace_back(funcs::GetRangeCudaTensor<int64_t, Context>( dev_ctx, x.dims()[i], phi::DataType::INT64)); } funcs::DealWithIndices<T, Context>(dev_ctx, x, int_indices_v, &res_indices_v, &tmp_res_indices_v, range_tensor_v, bd_dim, &res_dim_v); switch (total_dims) { case 1: LaunchIndexPutGradCudaKernel<T, Context, 1>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 2: LaunchIndexPutGradCudaKernel<T, Context, 2>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 3: LaunchIndexPutGradCudaKernel<T, Context, 3>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 4: LaunchIndexPutGradCudaKernel<T, Context, 4>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 5: LaunchIndexPutGradCudaKernel<T, Context, 5>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 6: LaunchIndexPutGradCudaKernel<T, Context, 6>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; default: PADDLE_THROW(phi::errors::InvalidArgument( "dims of input tensor should be less than 7, But received" "%d", x.dims().size())); } } } // namespace phi PD_REGISTER_KERNEL(index_put_grad, GPU, ALL_LAYOUT, phi::IndexPutGradKernel, float, double, int, int64_t, bool, phi::dtype::float16) {}
b18ab4be9896b5b17201000d7af1561ce3a7f17f.cu
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/index_put_grad_kernel.h" #include <numeric> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/cast_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/index_put_utils.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename T, size_t Rank> __global__ void set_zero_cuda_kernel(const int64_t N, int64_t** indices, phi::Array<int64_t, Rank> stride, phi::Array<int64_t, Rank> shape, T* out) { int64_t idx = threadIdx.x + blockDim.x * blockIdx.x; int64_t cur_ix = 0; if (idx >= N) { return; } int64_t offset = 0; for (int i = 0; i < Rank; ++i) { cur_ix = (static_cast<int64_t>(*(indices[i] + idx))); if (cur_ix < 0) { cur_ix += shape[i]; } offset += stride[i] * cur_ix; } *(out + offset) = 0; } template <typename T, size_t Rank> __global__ void index_put_grad_cuda_kernel(const int64_t N, const T* out_grad, int64_t** indices, phi::Array<int64_t, Rank> stride, phi::Array<int64_t, Rank> shape, T* value_grad) { int64_t idx = threadIdx.x + blockDim.x * blockIdx.x; int64_t cur_ix = 0; if (idx >= N) { return; } int64_t offset = 0; for (int i = 0; i < Rank; ++i) { cur_ix = (static_cast<int64_t>(*(indices[i] + idx))); if (cur_ix < 0) { cur_ix += shape[i]; } offset += stride[i] * cur_ix; } *(value_grad + idx) = *(out_grad + offset); } template <typename T, typename Context, size_t Rank> void LaunchIndexPutGradCudaKernel( const Context& dev_ctx, const std::vector<const DenseTensor*>& indices, const DenseTensor& out_grad, bool accumulate, DenseTensor* value_grad, DenseTensor* x_grad) { if (x_grad) { phi::Copy(dev_ctx, out_grad, dev_ctx.GetPlace(), false, x_grad); if (!accumulate) { T* x_grad_data = x_grad->data<T>(); auto x_grad_dims = x_grad->dims(); const int64_t numel = indices[0]->numel(); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel); auto x_grad_stride = phi::stride(x_grad_dims); phi::Array<int64_t, Rank> stride_a; phi::Array<int64_t, Rank> shape_a; for (size_t idx = 0; idx < Rank; ++idx) { stride_a[idx] = x_grad_stride[idx]; shape_a[idx] = x_grad_dims[idx]; } auto pd_indices = funcs::GetDevicePointerArray<int64_t, Context>(dev_ctx, indices); set_zero_cuda_kernel<T, Rank><<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>( numel, pd_indices, stride_a, shape_a, x_grad_data); } } auto out_grad_dims = out_grad.dims(); const int64_t numel = indices[0]->numel(); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel); auto out_grad_stride = phi::stride(out_grad_dims); phi::Array<int64_t, Rank> stride_a; phi::Array<int64_t, Rank> shape_a; for (size_t idx = 0; idx < Rank; ++idx) { stride_a[idx] = out_grad_stride[idx]; shape_a[idx] = out_grad_dims[idx]; } auto pd_indices = funcs::GetDevicePointerArray<int64_t, Context>(dev_ctx, indices); if (value_grad) { if (value_grad->numel() == 1) { DenseTensor tmp_value_grad(value_grad->dtype()); tmp_value_grad.Resize(indices[0]->dims()); T* tmp_value_grad_data = dev_ctx.template Alloc<T>(&tmp_value_grad); auto out_grad_data = out_grad.data<T>(); index_put_grad_cuda_kernel<T, Rank> <<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(numel, out_grad_data, pd_indices, stride_a, shape_a, tmp_value_grad_data); std::vector<int> v_dims(tmp_value_grad.dims().size()); std::iota(v_dims.begin(), v_dims.end(), 0); IntArray v_axis(v_dims); SumKernel<T, Context>(dev_ctx, tmp_value_grad, v_axis, value_grad->dtype(), false, value_grad); } else if (value_grad->numel() == indices[0]->numel()) { T* value_grad_data = dev_ctx.template Alloc<T>(value_grad); auto out_grad_data = out_grad.data<T>(); index_put_grad_cuda_kernel<T, Rank><<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>( numel, out_grad_data, pd_indices, stride_a, shape_a, value_grad_data); } else { DenseTensor tmp_value_grad(value_grad->dtype()); tmp_value_grad.Resize(indices[0]->dims()); T* tmp_value_grad_data = dev_ctx.template Alloc<T>(&tmp_value_grad); auto out_grad_data = out_grad.data<T>(); index_put_grad_cuda_kernel<T, Rank> <<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(numel, out_grad_data, pd_indices, stride_a, shape_a, tmp_value_grad_data); std::vector<int64_t> after_dims = phi::vectorize(tmp_value_grad.dims()); std::vector<int64_t> before_dims = phi::vectorize(value_grad->dims()); std::vector<int64_t> compress_dims; std::vector<int64_t> dims_without_1; funcs::CalCompressedDimsWith1AndWithout1( &after_dims, &before_dims, &compress_dims, &dims_without_1); auto pre_dims = value_grad->dims(); value_grad->Resize(phi::make_ddim(dims_without_1)); IntArray v_axis(compress_dims); SumKernel<T, Context>(dev_ctx, tmp_value_grad, v_axis, value_grad->dtype(), false, value_grad); value_grad->Resize(pre_dims); } } } template <typename T, typename Context> void IndexPutGradKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector<const DenseTensor*>& indices, const DenseTensor& value, const DenseTensor& out_grad, bool accumulate, DenseTensor* x_grad, DenseTensor* value_grad) { PADDLE_ENFORCE_EQ( x.dtype(), value.dtype(), phi::errors::InvalidArgument( "The data type of tensor value must be same to the data type " "of tensor x.")); std::vector<DenseTensor> tmp_args; std::vector<const phi::DenseTensor*> int_indices_v = funcs::DealWithBoolIndices<T, Context>(dev_ctx, indices, &tmp_args); if (int_indices_v.empty()) { if (x_grad) { phi::Copy(dev_ctx, out_grad, dev_ctx.GetPlace(), false, x_grad); } if (value_grad) { FullKernel<T, Context>(dev_ctx, phi::vectorize(value_grad->dims()), 0.0f, value_grad->dtype(), value_grad); } return; } const size_t total_dims = x.dims().size(); auto bd_dim = funcs::BroadCastTensorsDims(int_indices_v); std::vector<int64_t> res_dim_v(phi::vectorize(bd_dim)); std::vector<const phi::DenseTensor*> res_indices_v(x.dims().size(), nullptr); std::vector<DenseTensor> tmp_res_indices_v; std::vector<DenseTensor> range_tensor_v; for (int i = int_indices_v.size(); i < x.dims().size(); ++i) { range_tensor_v.emplace_back(funcs::GetRangeCudaTensor<int64_t, Context>( dev_ctx, x.dims()[i], phi::DataType::INT64)); } funcs::DealWithIndices<T, Context>(dev_ctx, x, int_indices_v, &res_indices_v, &tmp_res_indices_v, range_tensor_v, bd_dim, &res_dim_v); switch (total_dims) { case 1: LaunchIndexPutGradCudaKernel<T, Context, 1>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 2: LaunchIndexPutGradCudaKernel<T, Context, 2>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 3: LaunchIndexPutGradCudaKernel<T, Context, 3>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 4: LaunchIndexPutGradCudaKernel<T, Context, 4>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 5: LaunchIndexPutGradCudaKernel<T, Context, 5>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; case 6: LaunchIndexPutGradCudaKernel<T, Context, 6>( dev_ctx, res_indices_v, out_grad, accumulate, value_grad, x_grad); break; default: PADDLE_THROW(phi::errors::InvalidArgument( "dims of input tensor should be less than 7, But received" "%d", x.dims().size())); } } } // namespace phi PD_REGISTER_KERNEL(index_put_grad, GPU, ALL_LAYOUT, phi::IndexPutGradKernel, float, double, int, int64_t, bool, phi::dtype::float16) {}
6f998452a954a9400e1b1ffc7af95d44b8afc5e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC and Diamond Light Source Ltd. * * Copyright 2017 Daniil Kazantsev * Copyright 2017 Srikanth Nagella, Edoardo Pasca * Copyright 2018 Diamond Light Source Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "PatchSelect_GPU_core.h" #include "shared.h" /* CUDA implementation of non-local weight pre-calculation for non-local priors * Weights and associated indices are stored into pre-allocated arrays and passed * to the regulariser * * * Input Parameters: * 1. 2D grayscale image (classical 3D version will not be supported but rather 2D + dim extension (TODO)) * 2. Searching window (half-size of the main bigger searching window, e.g. 11) * 3. Similarity window (half-size of the patch window, e.g. 2) * 4. The number of neighbours to take (the most prominent after sorting neighbours will be taken) * 5. noise-related parameter to calculate non-local weights * 6. GPU device number if for multigpu run (default 0) * * Output [2D]: * 1. AR_i - indeces of i neighbours * 2. AR_j - indeces of j neighbours * 3. Weights_ij - associated weights */ #define BLKXSIZE 8 #define BLKYSIZE 4 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) #define M_PI 3.14159265358979323846 #define EPS 1.0e-8 #define CONSTVECSIZE5 121 #define CONSTVECSIZE7 225 #define CONSTVECSIZE9 361 #define CONSTVECSIZE11 529 #define CONSTVECSIZE13 729 __device__ void swap(float *xp, float *yp) { float temp = *xp; *xp = *yp; *yp = temp; } __device__ void swapUS(unsigned short *xp, unsigned short *yp) { unsigned short temp = *xp; *xp = *yp; *yp = temp; } /********************************************************************************/ __global__ void IndexSelect2D_5_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE5]; unsigned short ind_i[CONSTVECSIZE5]; unsigned short ind_j[CONSTVECSIZE5]; for(ind=0; ind<CONSTVECSIZE5; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } /********************************************************************************/ __global__ void IndexSelect2D_7_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE7]; unsigned short ind_i[CONSTVECSIZE7]; unsigned short ind_j[CONSTVECSIZE7]; for(ind=0; ind<CONSTVECSIZE7; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } __global__ void IndexSelect2D_9_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE9]; unsigned short ind_i[CONSTVECSIZE9]; unsigned short ind_j[CONSTVECSIZE9]; for(ind=0; ind<CONSTVECSIZE9; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } __global__ void IndexSelect2D_11_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE11]; unsigned short ind_i[CONSTVECSIZE11]; unsigned short ind_j[CONSTVECSIZE11]; for(ind=0; ind<CONSTVECSIZE11; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } __global__ void IndexSelect2D_13_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE13]; unsigned short ind_i[CONSTVECSIZE13]; unsigned short ind_j[CONSTVECSIZE13]; for(ind=0; ind<CONSTVECSIZE13; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ /********************* MAIN HOST FUNCTION ******************/ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ extern "C" int PatchSelect_GPU_main(float *A, unsigned short *H_i, unsigned short *H_j, float *Weights, int N, int M, int SearchWindow, int SimilarWin, int NumNeighb, float h, int gpu_device) { int deviceCount = -1; // number of devices hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(hipSetDevice(gpu_device)); int SearchW_full, SimilW_full, counterG, i, j; float *Ad, *Weights_d, h2, *Eucl_Vec, *Eucl_Vec_d; unsigned short *H_i_d, *H_j_d; h2 = h*h; dim3 dimBlock(BLKXSIZE,BLKYSIZE); dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE)); SearchW_full = (2*SearchWindow + 1)*(2*SearchWindow + 1); /* the full searching window size */ SimilW_full = (2*SimilarWin + 1)*(2*SimilarWin + 1); /* the full similarity window size */ /* generate a 2D Gaussian kernel for NLM procedure */ Eucl_Vec = (float*) calloc (SimilW_full,sizeof(float)); counterG = 0; for(i=-SimilarWin; i<=SimilarWin; i++) { for(j=-SimilarWin; j<=SimilarWin; j++) { Eucl_Vec[counterG] = (float)exp(-(pow(((float) i), 2) + pow(((float) j), 2))/(2.0*SimilarWin*SimilarWin)); counterG++; }} /*main neighb loop */ /*allocate space on the device*/ checkCudaErrors( hipMalloc((void**)&Ad, N*M*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&H_i_d, N*M*NumNeighb*sizeof(unsigned short)) ); checkCudaErrors( hipMalloc((void**)&H_j_d, N*M*NumNeighb*sizeof(unsigned short)) ); checkCudaErrors( hipMalloc((void**)&Weights_d, N*M*NumNeighb*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&Eucl_Vec_d, SimilW_full*sizeof(float)) ); /* copy data from the host to the device */ checkCudaErrors( hipMemcpy(Ad,A,N*M*sizeof(float),hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Eucl_Vec_d,Eucl_Vec,SimilW_full*sizeof(float),hipMemcpyHostToDevice) ); /********************** Run CUDA kernel here ********************/ if (SearchWindow == 5) hipLaunchKernelGGL(( IndexSelect2D_5_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 7) hipLaunchKernelGGL(( IndexSelect2D_7_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 9) hipLaunchKernelGGL(( IndexSelect2D_9_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 11) hipLaunchKernelGGL(( IndexSelect2D_11_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 13) hipLaunchKernelGGL(( IndexSelect2D_13_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else { fprintf(stderr, "Select the searching window size from 5, 7, 9, 11 or 13\n"); return -1;} checkCudaErrors(hipPeekAtLastError() ); checkCudaErrors(hipDeviceSynchronize()); /***************************************************************/ checkCudaErrors(hipMemcpy(H_i, H_i_d, N*M*NumNeighb*sizeof(unsigned short),hipMemcpyDeviceToHost) ); checkCudaErrors(hipMemcpy(H_j, H_j_d, N*M*NumNeighb*sizeof(unsigned short),hipMemcpyDeviceToHost) ); checkCudaErrors(hipMemcpy(Weights, Weights_d, N*M*NumNeighb*sizeof(float),hipMemcpyDeviceToHost) ); hipFree(Ad); hipFree(H_i_d); hipFree(H_j_d); hipFree(Weights_d); hipFree(Eucl_Vec_d); hipDeviceSynchronize(); return 0; }
6f998452a954a9400e1b1ffc7af95d44b8afc5e3.cu
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC and Diamond Light Source Ltd. * * Copyright 2017 Daniil Kazantsev * Copyright 2017 Srikanth Nagella, Edoardo Pasca * Copyright 2018 Diamond Light Source Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "PatchSelect_GPU_core.h" #include "shared.h" /* CUDA implementation of non-local weight pre-calculation for non-local priors * Weights and associated indices are stored into pre-allocated arrays and passed * to the regulariser * * * Input Parameters: * 1. 2D grayscale image (classical 3D version will not be supported but rather 2D + dim extension (TODO)) * 2. Searching window (half-size of the main bigger searching window, e.g. 11) * 3. Similarity window (half-size of the patch window, e.g. 2) * 4. The number of neighbours to take (the most prominent after sorting neighbours will be taken) * 5. noise-related parameter to calculate non-local weights * 6. GPU device number if for multigpu run (default 0) * * Output [2D]: * 1. AR_i - indeces of i neighbours * 2. AR_j - indeces of j neighbours * 3. Weights_ij - associated weights */ #define BLKXSIZE 8 #define BLKYSIZE 4 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) #define M_PI 3.14159265358979323846 #define EPS 1.0e-8 #define CONSTVECSIZE5 121 #define CONSTVECSIZE7 225 #define CONSTVECSIZE9 361 #define CONSTVECSIZE11 529 #define CONSTVECSIZE13 729 __device__ void swap(float *xp, float *yp) { float temp = *xp; *xp = *yp; *yp = temp; } __device__ void swapUS(unsigned short *xp, unsigned short *yp) { unsigned short temp = *xp; *xp = *yp; *yp = temp; } /********************************************************************************/ __global__ void IndexSelect2D_5_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE5]; unsigned short ind_i[CONSTVECSIZE5]; unsigned short ind_j[CONSTVECSIZE5]; for(ind=0; ind<CONSTVECSIZE5; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } /********************************************************************************/ __global__ void IndexSelect2D_7_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE7]; unsigned short ind_i[CONSTVECSIZE7]; unsigned short ind_j[CONSTVECSIZE7]; for(ind=0; ind<CONSTVECSIZE7; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } __global__ void IndexSelect2D_9_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE9]; unsigned short ind_i[CONSTVECSIZE9]; unsigned short ind_j[CONSTVECSIZE9]; for(ind=0; ind<CONSTVECSIZE9; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } __global__ void IndexSelect2D_11_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE11]; unsigned short ind_i[CONSTVECSIZE11]; unsigned short ind_j[CONSTVECSIZE11]; for(ind=0; ind<CONSTVECSIZE11; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } __global__ void IndexSelect2D_13_kernel(float *Ad, unsigned short *H_i_d, unsigned short *H_j_d, float *Weights_d, float *Eucl_Vec_d, int N, int M, int SearchWindow, int SearchW_full, int SimilarWin, int NumNeighb, float h2) { long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, counterG, index2, ind; float normsum; float Weight_Vec[CONSTVECSIZE13]; unsigned short ind_i[CONSTVECSIZE13]; unsigned short ind_j[CONSTVECSIZE13]; for(ind=0; ind<CONSTVECSIZE13; ind++) { Weight_Vec[ind] = 0.0; ind_i[ind] = 0; ind_j[ind] = 0; } int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; long index = i + N*j; counter = 0; for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) { i1 = i+i_m; if ((i1 >= 0) && (i1 < N)) { for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) { j1 = j+j_m; if ((j1 >= 0) && (j1 < M)) { normsum = 0.0f; counterG = 0; for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) { i2 = i1 + i_c; i3 = i + i_c; if ((i2 >= 0) && (i2 < N) && (i3 >= 0) && (i3 < N)) { for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) { j2 = j1 + j_c; j3 = j + j_c; if ((j2 >= 0) && (j2 < M) && (j3 >= 0) && (j3 < M)) { normsum += Eucl_Vec_d[counterG]*powf(Ad[i3 + N*j3] - Ad[i2 + N*j2], 2); counterG++; } /*if j2 j3*/ } } /*if i2 i3*/ } /* writing temporarily into vectors */ if (normsum > EPS) { Weight_Vec[counter] = expf(-normsum/h2); ind_i[counter] = i1; ind_j[counter] = j1; counter++; } } /*if j1*/ } } /*if i1*/ } /* do sorting to choose the most prominent weights [HIGH to LOW] */ /* and re-arrange indeces accordingly */ for (x = 0; x < counter-1; x++) { for (y = 0; y < counter-x-1; y++) { if (Weight_Vec[y] < Weight_Vec[y+1]) { swap(&Weight_Vec[y], &Weight_Vec[y+1]); swapUS(&ind_i[y], &ind_i[y+1]); swapUS(&ind_j[y], &ind_j[y+1]); } } } /*sorting loop finished*/ /*now select the NumNeighb more prominent weights and store into arrays */ for(x=0; x < NumNeighb; x++) { index2 = (N*M*x) + index; H_i_d[index2] = ind_i[x]; H_j_d[index2] = ind_j[x]; Weights_d[index2] = Weight_Vec[x]; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ /********************* MAIN HOST FUNCTION ******************/ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ extern "C" int PatchSelect_GPU_main(float *A, unsigned short *H_i, unsigned short *H_j, float *Weights, int N, int M, int SearchWindow, int SimilarWin, int NumNeighb, float h, int gpu_device) { int deviceCount = -1; // number of devices cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(cudaSetDevice(gpu_device)); int SearchW_full, SimilW_full, counterG, i, j; float *Ad, *Weights_d, h2, *Eucl_Vec, *Eucl_Vec_d; unsigned short *H_i_d, *H_j_d; h2 = h*h; dim3 dimBlock(BLKXSIZE,BLKYSIZE); dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE)); SearchW_full = (2*SearchWindow + 1)*(2*SearchWindow + 1); /* the full searching window size */ SimilW_full = (2*SimilarWin + 1)*(2*SimilarWin + 1); /* the full similarity window size */ /* generate a 2D Gaussian kernel for NLM procedure */ Eucl_Vec = (float*) calloc (SimilW_full,sizeof(float)); counterG = 0; for(i=-SimilarWin; i<=SimilarWin; i++) { for(j=-SimilarWin; j<=SimilarWin; j++) { Eucl_Vec[counterG] = (float)exp(-(pow(((float) i), 2) + pow(((float) j), 2))/(2.0*SimilarWin*SimilarWin)); counterG++; }} /*main neighb loop */ /*allocate space on the device*/ checkCudaErrors( cudaMalloc((void**)&Ad, N*M*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&H_i_d, N*M*NumNeighb*sizeof(unsigned short)) ); checkCudaErrors( cudaMalloc((void**)&H_j_d, N*M*NumNeighb*sizeof(unsigned short)) ); checkCudaErrors( cudaMalloc((void**)&Weights_d, N*M*NumNeighb*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&Eucl_Vec_d, SimilW_full*sizeof(float)) ); /* copy data from the host to the device */ checkCudaErrors( cudaMemcpy(Ad,A,N*M*sizeof(float),cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Eucl_Vec_d,Eucl_Vec,SimilW_full*sizeof(float),cudaMemcpyHostToDevice) ); /********************** Run CUDA kernel here ********************/ if (SearchWindow == 5) IndexSelect2D_5_kernel<<<dimGrid,dimBlock>>>(Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 7) IndexSelect2D_7_kernel<<<dimGrid,dimBlock>>>(Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 9) IndexSelect2D_9_kernel<<<dimGrid,dimBlock>>>(Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 11) IndexSelect2D_11_kernel<<<dimGrid,dimBlock>>>(Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else if (SearchWindow == 13) IndexSelect2D_13_kernel<<<dimGrid,dimBlock>>>(Ad, H_i_d, H_j_d, Weights_d, Eucl_Vec_d, N, M, SearchWindow, SearchW_full, SimilarWin, NumNeighb, h2); else { fprintf(stderr, "Select the searching window size from 5, 7, 9, 11 or 13\n"); return -1;} checkCudaErrors(cudaPeekAtLastError() ); checkCudaErrors(cudaDeviceSynchronize()); /***************************************************************/ checkCudaErrors(cudaMemcpy(H_i, H_i_d, N*M*NumNeighb*sizeof(unsigned short),cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaMemcpy(H_j, H_j_d, N*M*NumNeighb*sizeof(unsigned short),cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaMemcpy(Weights, Weights_d, N*M*NumNeighb*sizeof(float),cudaMemcpyDeviceToHost) ); cudaFree(Ad); cudaFree(H_i_d); cudaFree(H_j_d); cudaFree(Weights_d); cudaFree(Eucl_Vec_d); cudaDeviceSynchronize(); return 0; }
dbdc8646c53f25c1334e12524612de2d199dbdf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include <assert.h> extern "C" { #include "blas.h" #include "hip/hip_runtime.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates); check_error(hipPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size); check_error(hipPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n); }else{ hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size); } check_error(hipPeekAtLastError()); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(hipPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t); check_error(hipPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(hipPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(hipPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; //printf("%f\n", sum); for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; hipLaunchKernelGGL(( l2norm_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, dx, batch, filters, spatial); check_error(hipPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean); check_error(hipPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY); check_error(hipPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY); check_error(hipPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out); check_error(hipPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out); check_error(hipPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, val); check_error(hipPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale); check_error(hipPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX); check_error(hipPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; //out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(hipPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( softmax_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( logistic_x_ent_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { hipLaunchKernelGGL(( wgan_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error); check_error(hipPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT); check_error(hipPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c); check_error(hipPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc); check_error(hipPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c); check_error(hipPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(hipPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(hipPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; hipLaunchKernelGGL(( upsample_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, in, w, h, c, batch, stride, forward, scale, out); check_error(hipPeekAtLastError()); }
dbdc8646c53f25c1334e12524612de2d199dbdf9.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include <assert.h> extern "C" { #include "blas.h" #include "cuda.h" #include "utils.h" } __global__ void scale_bias_kernel(float *output, float *biases, int n, int size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int filter = blockIdx.y; int batch = blockIdx.z; if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter]; } void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) { dim3 dimGrid((size-1)/BLOCK + 1, n, batch); dim3 dimBlock(BLOCK, 1, 1); scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index]*x_norm[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i]; } } void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates); check_error(cudaPeekAtLastError()); } __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n*size*batch) return; int i = index % size; index /= size; int j = index % n; index /= n; int k = index; output[(k*n+j)*size + i] += biases[j]; } void add_bias_gpu(float *output, float *biases, int batch, int n, int size) { int num = n*size*batch; add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size); check_error(cudaPeekAtLastError()); } __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= n) return; int b; float sum = 0; for(b = 0; b < batch; ++b){ int i = b*n + index; sum += delta[i]; } bias_updates[index] += sum; } __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size) { __shared__ float part[BLOCK]; int i,b; int filter = blockIdx.x; int p = threadIdx.x; float sum = 0; for(b = 0; b < batch; ++b){ for(i = 0; i < size; i += BLOCK){ int index = p + i + size*(filter + n*b); sum += (p+i < size) ? delta[index] : 0; } } part[p] = sum; __syncthreads(); if (p == 0) { for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i]; } } void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) { if(size == 1){ backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n); }else{ backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size); } check_error(cudaPeekAtLastError()); } /* __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int f1 = index / n; int f2 = index % n; if (f2 <= f1) return; float sum = 0; float norm1 = 0; float norm2 = 0; int b, i; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; sum += output[i1] * output[i2]; norm1 += output[i1] * output[i1]; norm2 += output[i2] * output[i2]; } } norm1 = sqrt(norm1); norm2 = sqrt(norm2); float norm = norm1 * norm2; sum = sum / norm; for(b = 0; b < batch; ++b){ for(i = 0; i < size; ++i){ int i1 = b * size * n + f1 * size + i; int i2 = b * size * n + f2 * size + i; delta[i1] += - scale * sum * output[i2] / norm; delta[i2] += - scale * sum * output[i1] / norm; } } } void dot_error_gpu(layer l) { dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); check_error(cudaPeekAtLastError()); } */ __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; float mhat = m[index] / (1.f - powf(B1, t)); float vhat = v[index] / (1.f - powf(B2, t)); x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps); } extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t) { adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t); check_error(cudaPeekAtLastError()); } extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t) { scal_gpu(n, B1, m, 1); scal_gpu(n, B2, v, 1); axpy_gpu(n, -decay*batch, w, 1, d, 1); axpy_gpu(n, (1-B1), d, 1, m, 1); mul_gpu(n, d, 1, d, 1); axpy_gpu(n, (1-B2), d, 1, v, 1); adam_gpu(n, w, m, v, B1, B2, rate, eps, t); fill_gpu(n, 0, d, 1); } __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f)); } __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int f = (index/spatial)%filters; delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); } extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) { size_t N = batch*filters*spatial; normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); check_error(cudaPeekAtLastError()); } __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; variance_delta[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance_delta[i] += delta[index]*(x[index] - mean[i]); } } variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f)); } __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } } __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index] : 0; } } __syncthreads(); if(id == 0){ mean_delta[filter] = 0; for(i = 0; i < threads; ++i){ mean_delta[filter] += local[i]; } mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f)); } } __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; } } __syncthreads(); if(id == 0){ variance_delta[filter] = 0; for(i = 0; i < threads; ++i){ variance_delta[filter] += local[i]; } variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f)); } } __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean_delta[i] = 0; for (j = 0; j < batch; ++j) { for (k = 0; k < spatial; ++k) { int index = j*filters*spatial + i*spatial + k; mean_delta[i] += delta[index]; } } mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f)); } extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) { fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) { fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); check_error(cudaPeekAtLastError()); } __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1.f/(batch * spatial); int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; int j,k; mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1.f/(batch * spatial - 1); int j,k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= filters) return; variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += powf((x[index] - mean[i]), 2); } } variance[i] *= scale; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX]; } __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA); } __global__ void const_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX])); } __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) { if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0; } } __global__ void add_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] += ALPHA; } __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] *= ALPHA; } __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) X[i*INCX] = ALPHA; } __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX]; } __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < N) Y[i*INCY] *= X[i*INCX]; } extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { size_t N = batch*filters*spatial; normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (index >= N) return; int b = index / spatial; int i = index % spatial; int f; float sum = 0; for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; sum += powf(x[index], 2); } sum = sqrtf(sum); if(sum == 0) sum = 1; //printf("%f\n", sum); for(f = 0; f < filters; ++f){ int index = b*filters*spatial + f*spatial + i; x[index] /= sum; dx[index] = (1 - x[index]) / sum; } } extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial) { size_t N = batch*spatial; l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial); check_error(cudaPeekAtLastError()); } __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? x[index] : 0; } } __syncthreads(); if(id == 0){ mean[filter] = 0; for(i = 0; i < threads; ++i){ mean[filter] += local[i]; } mean[filter] /= spatial * batch; } } __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) { const int threads = BLOCK; __shared__ float local[threads]; int id = threadIdx.x; local[id] = 0; int filter = blockIdx.x; int i, j; for(j = 0; j < batch; ++j){ for(i = 0; i < spatial; i += threads){ int index = j*spatial*filters + filter*spatial + i + id; local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0; } } __syncthreads(); if(id == 0){ variance[filter] = 0; for(i = 0; i < threads; ++i){ variance[filter] += local[i]; } variance[filter] /= (spatial * batch - 1); } } extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) { mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean); check_error(cudaPeekAtLastError()); } extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); } extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) { pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY) { copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY); } extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY) { mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY); check_error(cudaPeekAtLastError()); } extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) { copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY); check_error(cudaPeekAtLastError()); } __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_s = i%spatial; i = i/spatial; int in_c = i%layers; i = i/layers; int b = i; int i1 = b*layers*spatial + in_c*spatial + in_s; int i2 = b*layers*spatial + in_s*layers + in_c; if (forward) out[i2] = x[i1]; else out[i1] = x[i2]; } extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out) { int size = spatial*batch*layers; flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out); check_error(cudaPeekAtLastError()); } extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int size = w*h*c*batch; reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out); check_error(cudaPeekAtLastError()); } __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] = val; } extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val) { mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val); check_error(cudaPeekAtLastError()); } __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n && mask[i] == mask_num) x[i] *= scale; } extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale) { scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale); check_error(cudaPeekAtLastError()); } extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX) { const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX) { constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX) { add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX) { scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX) { supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX) { fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); check_error(cudaPeekAtLastError()); } __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= size) return; int i = id % minw; id /= minw; int j = id % minh; id /= minh; int k = id % minc; id /= minc; int b = id % batch; int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; //out[out_index] += add[add_index]; } extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int size = batch * minw * minh * minc; shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out); check_error(cudaPeekAtLastError()); } __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; float abs_val = fabsf(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001); delta[i] = t-p; } } extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error) { logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = diff * diff; //I know this is technically wrong, deal with it. delta[i] = diff; } } extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) { l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ float diff = truth[i] - pred[i]; error[i] = abs(diff); delta[i] = (diff > 0) ? 1 : -1; } } extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error) { l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ error[i] = truth[i] ? -pred[i] : pred[i]; delta[i] = (truth[i] > 0) ? 1 : -1; } } extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error) { wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); check_error(cudaPeekAtLastError()); } __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ if(X) X[b*NX + j] += OUT[i]; } else { if(Y) Y[b*NY + j - NX] += OUT[i]; } } } extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < (NX+NY)*B){ int b = i / (NX+NY); int j = i % (NX+NY); if (j < NX){ OUT[i] = X[b*NX + j]; } else { OUT[i] = Y[b*NY + j - NX]; } } } extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT); check_error(cudaPeekAtLastError()); } extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c) { weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c); check_error(cudaPeekAtLastError()); } __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc) { weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc); check_error(cudaPeekAtLastError()); } __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n){ c[i] += a[i]*b[i]; } } extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c) { mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c); check_error(cudaPeekAtLastError()); } __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= spatial*batch*groups) return; int s = id % spatial; id = id / spatial; int g = id % groups; int b = id / groups; int goff = group_offset[g]*spatial; int boff = b*stride; softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s); } extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier) { int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); /* static int *tree_groups_size = 0; static int *tree_groups_offset = 0; if(!tree_groups_size){ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups); tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups); } */ int num = spatial*batch*hier.groups; softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset); check_error(cudaPeekAtLastError()); cuda_free((float *)tree_groups_size); cuda_free((float *)tree_groups_offset); } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output); check_error(cudaPeekAtLastError()); } __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int out_index = i; int out_w = i%(w*stride); i = i/(w*stride); int out_h = i%(h*stride); i = i/(h*stride); int out_c = i%c; i = i/c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if(forward) out[out_index] += scale * x[in_index]; else atomicAdd(x+in_index, scale * out[out_index]); } extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t size = w*h*c*batch*stride*stride; upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out); check_error(cudaPeekAtLastError()); }
22c18dde7c51c9615db38184fb1b0bea87e4efc1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void hello_cuda() { printf("Hello Cuda!\n"); } int main() { dim3 block(4); dim3 grid(8); hipLaunchKernelGGL(( hello_cuda), dim3(grid), dim3(block), 0, 0, );; hipDeviceSynchronize(); hipDeviceReset(); return 0; }
22c18dde7c51c9615db38184fb1b0bea87e4efc1.cu
#include <stdio.h> __global__ void hello_cuda() { printf("Hello Cuda!\n"); } int main() { dim3 block(4); dim3 grid(8); hello_cuda<<<grid, block>>>();; cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
8a0cbc6e04934267544f6cc7859e31e81a176911.hip
// !!! This is a file automatically generated by hipify!!! /// --------------------------------------------------------------------------- /// CUDA Workshop 2018 /// Universidad de Alicante /// Prctica 0 - Suma de Vectores /// Cdigo preparado por: Albert Garca <[email protected]> /// Sergio Orts <[email protected]> /// --------------------------------------------------------------------------- #include <iostream> // Cabecera necesaria para las rutinas del runtime de CUDA (hipFree, hipMalloc...) #include <hip/hip_runtime.h> // Cabecera necesaria para variables y tipos de CUDA... #include <device_launch_parameters.h> /// Kernel para suma de vectores. /// Este kernel computar la suma de dos vectores de forma que cada /// hilo ser responsable de sumar un elemento de dichos vectores. __global__ void suma_vectores( const float *cpA, const float *cpB, float *pC, const int cNumElements) { /// === PASO 2 ============================================================ /// Define los ndices del elemento a ser sumado por cada hilo empleando las /// variables de CUDA: threadIdx, blockIdx y blockDim. /// TODO: int idx_ = ???; /// Suma las dos posiciones en el vector de salida, cada hilo debe computar /// el clculo de un elemento. /// TODO: /// === FIN PASO 2 ======================================================== } /// Kernel para suma de vectores con stride. /// Este kernel computar la suma de dos vectores de forma que cada /// hilo ser responsable de sumar varios elementos de dichos vectores. __global__ void suma_vectores_strided( const float *cpA, const float *cpB, float *pC, const int cNumElements) { /// === PASO 4 ============================================================ /// Modifica el kernel anterior para que se puedan sumar vectores de un /// tamao muy grande. Recuerda cambiar los parmetros de invocacin y /// llamar a este kernel en lugar de al anterior. int idx_ = ???; /// Sumar las posiciones adecuadas en el vector de salida, cada hilo debe /// computar ms de un elemento. /// TODO: /// === FINAL PASO 4 ====================================================== } int main(void) { // Elegimos la GPU a utilizar, en este caso la 0 hipSetDevice(0); // Calculamos el tamao en bytes del vector /// === PASO 3 ============================================================ /// Modifica el nmero de elementos a sumar. const int kNumElements = 25600; /// === FIN PASO 3 ======================================================== size_t vector_size_bytes_ = kNumElements * sizeof(float); std::cout << "[Vector addition of " << kNumElements << " elements]\n"; // Reservamos memoria para los vectores en el HOST float *h_A_ = (float *)malloc(vector_size_bytes_); float *h_B_ = (float *)malloc(vector_size_bytes_); float *h_C_ = (float *)malloc(vector_size_bytes_); // Comprobamos que las reservas se han efectuado correctamente if (h_A_ == NULL || h_B_ == NULL || h_C_ == NULL) { std::cerr << "Failed to allocate host vectors!\n"; getchar(); exit(-1); } // Inicializamos los vectores en el HOST con valores arbitrarios for (int i = 0; i < kNumElements; ++i) { h_A_[i] = rand()/(float)RAND_MAX; h_B_[i] = rand()/(float)RAND_MAX; } // Reservamos memoria para los vectores en el DEVICE float *d_A_ = NULL; float *d_B_ = NULL; float *d_C_ = NULL; hipMalloc((void **)&d_A_, vector_size_bytes_); hipMalloc((void **)&d_B_, vector_size_bytes_); hipMalloc((void **)&d_C_, vector_size_bytes_); // Copiamos los vectores A y B de HOST a DEVICE std::cout << "Copy input data from the host memory to the CUDA device\n"; hipMemcpy(d_A_, h_A_, vector_size_bytes_, hipMemcpyHostToDevice); hipMemcpy(d_B_, h_B_, vector_size_bytes_, hipMemcpyHostToDevice); // Lanzamos el kernel de suma de vectores y comprobamos errores /// === PASO 1 ============================================================ /// Establece los parmetros de invocacin del kernel e invcalo. int threads_per_block_ = 256; int blocks_per_grid_ = ???; /// === PASO 3 ============================================================ /// Modifica el clculo del tamao del grid para que se puedan sumar /// vectores de un tamao arbitrario. /// blocks_per_grid_ = ???; /// === FIN PASO 3 ======================================================== /// === PASO 4 ============================================================ /// Establece los parmetros de invocacin del kernel modificado. /// blocks_per_grid_ = ???; /// === FIN PASO 4 ======================================================== dim3 block(threads_per_block_, 1, 1); dim3 grid(blocks_per_grid_, 1, 1); std::cout << "CUDA kernel launch with " << blocks_per_grid_ << " blocks of " << threads_per_block_ << " threads\n"; hipLaunchKernelGGL(( suma_vectores), dim3(???), dim3(???), 0, 0, ???); hipError_t err_ = hipGetLastError(); if (err_ != hipSuccess) { std::cerr << "Failed to launch sumaVectores kernel (error code " << hipGetErrorString(err_) << ")!\n"; getchar(); exit(-1); } /// === FIN PASO 1 ======================================================== // Copiamos el vector resultante del DEVICE al HOST std::cout << "Copy output data from the CUDA device to the host memory\n"; hipMemcpy(h_C_, d_C_, vector_size_bytes_, hipMemcpyDeviceToHost); // Verificamos el resultado for (int i = 0; i < kNumElements; ++i) { // Dado que utilizamos floats las comparaciones de igualdad fallaran // por el orden de las operaciones por lo que utilizamos una comparacin // con un umbral 1e-5 if (fabs(h_A_[i] + h_B_[i] - h_C_[i]) > 1e-5) { std::cerr << "Result verification failed at element " << i << "!\n"; getchar(); exit(-1); } } std::cout << "Test PASSED\n"; // Liberamos la memoria en el DEVICE hipFree(d_A_); hipFree(d_B_); hipFree(d_C_); // Liberamos la memoria en el HOST free(h_A_); free(h_B_); free(h_C_); // Reiniciamos el dispositivo // hipDeviceReset hace que el driver limpie todo estado actual. Aunque no es // una operacin obligatoria, es una buena prctica. Adems, es necesaria si // estamos realiando profiling de la aplicacin. hipDeviceReset(); // Finalizamos el programa std::cout << "Done\n"; getchar(); return 0; }
8a0cbc6e04934267544f6cc7859e31e81a176911.cu
/// --------------------------------------------------------------------------- /// CUDA Workshop 2018 /// Universidad de Alicante /// Práctica 0 - Suma de Vectores /// Código preparado por: Albert García <[email protected]> /// Sergio Orts <[email protected]> /// --------------------------------------------------------------------------- #include <iostream> // Cabecera necesaria para las rutinas del runtime de CUDA (cudaFree, cudaMalloc...) #include <cuda_runtime.h> // Cabecera necesaria para variables y tipos de CUDA... #include <device_launch_parameters.h> /// Kernel para suma de vectores. /// Este kernel computará la suma de dos vectores de forma que cada /// hilo será responsable de sumar un elemento de dichos vectores. __global__ void suma_vectores( const float *cpA, const float *cpB, float *pC, const int cNumElements) { /// === PASO 2 ============================================================ /// Define los índices del elemento a ser sumado por cada hilo empleando las /// variables de CUDA: threadIdx, blockIdx y blockDim. /// TODO: int idx_ = ???; /// Suma las dos posiciones en el vector de salida, cada hilo debe computar /// el cálculo de un elemento. /// TODO: /// === FIN PASO 2 ======================================================== } /// Kernel para suma de vectores con stride. /// Este kernel computará la suma de dos vectores de forma que cada /// hilo será responsable de sumar varios elementos de dichos vectores. __global__ void suma_vectores_strided( const float *cpA, const float *cpB, float *pC, const int cNumElements) { /// === PASO 4 ============================================================ /// Modifica el kernel anterior para que se puedan sumar vectores de un /// tamaño muy grande. Recuerda cambiar los parámetros de invocación y /// llamar a este kernel en lugar de al anterior. int idx_ = ???; /// Sumar las posiciones adecuadas en el vector de salida, cada hilo debe /// computar más de un elemento. /// TODO: /// === FINAL PASO 4 ====================================================== } int main(void) { // Elegimos la GPU a utilizar, en este caso la 0 cudaSetDevice(0); // Calculamos el tamaño en bytes del vector /// === PASO 3 ============================================================ /// Modifica el número de elementos a sumar. const int kNumElements = 25600; /// === FIN PASO 3 ======================================================== size_t vector_size_bytes_ = kNumElements * sizeof(float); std::cout << "[Vector addition of " << kNumElements << " elements]\n"; // Reservamos memoria para los vectores en el HOST float *h_A_ = (float *)malloc(vector_size_bytes_); float *h_B_ = (float *)malloc(vector_size_bytes_); float *h_C_ = (float *)malloc(vector_size_bytes_); // Comprobamos que las reservas se han efectuado correctamente if (h_A_ == NULL || h_B_ == NULL || h_C_ == NULL) { std::cerr << "Failed to allocate host vectors!\n"; getchar(); exit(-1); } // Inicializamos los vectores en el HOST con valores arbitrarios for (int i = 0; i < kNumElements; ++i) { h_A_[i] = rand()/(float)RAND_MAX; h_B_[i] = rand()/(float)RAND_MAX; } // Reservamos memoria para los vectores en el DEVICE float *d_A_ = NULL; float *d_B_ = NULL; float *d_C_ = NULL; cudaMalloc((void **)&d_A_, vector_size_bytes_); cudaMalloc((void **)&d_B_, vector_size_bytes_); cudaMalloc((void **)&d_C_, vector_size_bytes_); // Copiamos los vectores A y B de HOST a DEVICE std::cout << "Copy input data from the host memory to the CUDA device\n"; cudaMemcpy(d_A_, h_A_, vector_size_bytes_, cudaMemcpyHostToDevice); cudaMemcpy(d_B_, h_B_, vector_size_bytes_, cudaMemcpyHostToDevice); // Lanzamos el kernel de suma de vectores y comprobamos errores /// === PASO 1 ============================================================ /// Establece los parámetros de invocación del kernel e invócalo. int threads_per_block_ = 256; int blocks_per_grid_ = ???; /// === PASO 3 ============================================================ /// Modifica el cálculo del tamaño del grid para que se puedan sumar /// vectores de un tamaño arbitrario. /// blocks_per_grid_ = ???; /// === FIN PASO 3 ======================================================== /// === PASO 4 ============================================================ /// Establece los parámetros de invocación del kernel modificado. /// blocks_per_grid_ = ???; /// === FIN PASO 4 ======================================================== dim3 block(threads_per_block_, 1, 1); dim3 grid(blocks_per_grid_, 1, 1); std::cout << "CUDA kernel launch with " << blocks_per_grid_ << " blocks of " << threads_per_block_ << " threads\n"; suma_vectores<<<???, ???>>>(???); cudaError_t err_ = cudaGetLastError(); if (err_ != cudaSuccess) { std::cerr << "Failed to launch sumaVectores kernel (error code " << cudaGetErrorString(err_) << ")!\n"; getchar(); exit(-1); } /// === FIN PASO 1 ======================================================== // Copiamos el vector resultante del DEVICE al HOST std::cout << "Copy output data from the CUDA device to the host memory\n"; cudaMemcpy(h_C_, d_C_, vector_size_bytes_, cudaMemcpyDeviceToHost); // Verificamos el resultado for (int i = 0; i < kNumElements; ++i) { // Dado que utilizamos floats las comparaciones de igualdad fallarían // por el orden de las operaciones por lo que utilizamos una comparación // con un umbral 1e-5 if (fabs(h_A_[i] + h_B_[i] - h_C_[i]) > 1e-5) { std::cerr << "Result verification failed at element " << i << "!\n"; getchar(); exit(-1); } } std::cout << "Test PASSED\n"; // Liberamos la memoria en el DEVICE cudaFree(d_A_); cudaFree(d_B_); cudaFree(d_C_); // Liberamos la memoria en el HOST free(h_A_); free(h_B_); free(h_C_); // Reiniciamos el dispositivo // cudaDeviceReset hace que el driver limpie todo estado actual. Aunque no es // una operación obligatoria, es una buena práctica. Además, es necesaria si // estamos realiando profiling de la aplicación. cudaDeviceReset(); // Finalizamos el programa std::cout << "Done\n"; getchar(); return 0; }
d9a0f59d974205c8404d0312265eb8d50fa69f34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <tuple> #include <type_traits> #include "dali/core/backend_tags.h" #include "dali/core/convert.h" #include "dali/core/error_handling.h" #include "dali/core/fast_div.h" #include "dali/core/float16.h" #include "dali/core/permute.h" #include "dali/core/static_switch.h" #include "dali/kernels/slice/slice_hwc2chw_normalize_gpu.h" namespace dali { namespace kernels { namespace slice_flip_normalize { template <typename Out, typename In> struct Hwc2HwcChwSampleDesc { Out *__restrict__ out; const In *__restrict__ in; const float *__restrict__ norm_add; const float *__restrict__ norm_mul; const Out *__restrict__ fill_values; int64_t sample_size; uint32_t first_block; // Dimensions of the output int H, W, C; // Dimensions of the input (relevant to input stride) int input_W, input_C; bool flip_x; }; // TODO(klecki): Generalize the utility for binsearch indexing of thread blocks with Cast kernel. inline __device__ uint32_t FindSampleIdx(const uint32_t *first_blocks, uint32_t num_samples) { uint32_t i = 0; for (uint32_t jump = (1 << (32 - __clz(num_samples) - 1)); jump; jump >>= 1) { if (i + jump < num_samples && first_blocks[i + jump] <= blockIdx.x) i += jump; } return i; } /** @defgroup Hwc2HwcChwLoad Data loading for slice Hwc2{Hwc,Chw} Normalize Mirror-x Pad-channel * kernel Load the data from linear chunk of HWC u8 image into a tile in shared memory. The loading * loop consists of three stages: * 1. Prologue - read from the start of the tile to the address that is multiple of 4 byte alignment * 2. Main loop - read most of the tile via uchar4, utilizing 4-byte read instructions. * 3. Epilogue - read the remainder of data that is not covered by the two previous loops. * * The slicing variant is addressed reads only the values required by the output, proceeding * row by row, using the same pattern as above for each row. * Samples are adjusted so that rows slices start at 0, and only the end of row is sliced. * @{ */ /** * @brief Load the linear tile into linear smem buffer. * * @tparam kBlockSize Tile size * @tparam kStaticChannels Number of input channels * @tparam Tile Type of the data kept after loading in the smem tile. * @tparam Out Output data type * @tparam In Input data type * @tparam kLoadAlign - Alignment (in bytes) of the main loop. The access to smem is also aligned * to this value, so depending on the prologue length, the data after loading may not start * at the tile[0]. The start of actual data is returned. * The smem tile must hold at least kBlockSize + kLoadAlign elements. * @param tile Shared memory where to load the data. * @param sample Sample description * @return Tile * - the pointer to the smem where the start of the loaded data is. */ template <int kBlockSize, int kStaticChannels, typename Tile, typename Out, typename In, int kLoadAlign = 32 * 4> __device__ __forceinline__ Tile *load_linear_tile(Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { static_assert(std::is_same_v<In, uint8_t>, "Only uint8_t types allowed now."); static_assert(kStaticChannels == 3, "Only 3 input channels allowed now."); static_assert(kLoadAlign % 4 == 0, "The loading alignment should be divisible by 4."); int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); auto in_start = reinterpret_cast<std::uintptr_t>(sample.in + start_x); auto aligned_in_start = align_up(in_start, kLoadAlign); // In case if end_x - start_x < kLoadAlign, we never get to the aligned main loop uint32_t bytes_to_alignment = ::min(aligned_in_start - in_start, end_x - start_x); Tile *aligned_tile = tile + kLoadAlign; Tile *prologue_tile = aligned_tile - bytes_to_alignment; const In *prologue_in = sample.in + start_x; const uchar4 *aligned_in_uchar4 = reinterpret_cast<const uchar4 *>(sample.in + start_x + bytes_to_alignment); // prologue for (uint32_t idx = threadIdx.x; idx < bytes_to_alignment; idx += blockDim.x) { prologue_tile[idx] = prologue_in[idx]; } // this might be 0, as the prologue may be the full extend of the tile uint32_t left_after_prologue = end_x - start_x - bytes_to_alignment; // We read 4 values in each iteration uint32_t main_loop_length = left_after_prologue >> 2; // main loop: aligned load for (uint32_t idx = threadIdx.x; idx < main_loop_length; idx += blockDim.x) { uchar4 in = aligned_in_uchar4[idx]; aligned_tile[idx * 4 + 0] = in.x; aligned_tile[idx * 4 + 1] = in.y; aligned_tile[idx * 4 + 2] = in.z; aligned_tile[idx * 4 + 3] = in.w; } uint32_t processed_in_main = left_after_prologue & -4; // equivalent to (x / 4) * 4 uint32_t left_after_main = left_after_prologue - processed_in_main; // epilogue Tile *epilogue_tile = aligned_tile + processed_in_main; const In *epilogue_in = reinterpret_cast<const In *>(aligned_in_uchar4 + main_loop_length); for (uint32_t idx = threadIdx.x; idx < left_after_main; idx++) { epilogue_tile[idx] = epilogue_in[idx]; } // Return the start of the tile return prologue_tile; } /** * @brief Load the slices of linear tile into linear smem buffer. * * The kernel proceeds row-by-row, reading the output width elements/pixels, skipping the remaining * input_width - output_width pixels. * * @tparam kBlockSize Tile size * @tparam kStaticChannels Number of input channels * @tparam Tile Type of the data kept after loading in the smem tile. * @tparam Out Output data type * @tparam In Input data type * @tparam kLoadAlign - Alignment (in bytes) of the main loop. * The smem tile must hold at least kBlockSize + kLoadAlign elements. * @param tile Shared memory where to load the data. * @param sample Sample description * @return Tile * - the pointer to the smem where the start of the loaded data is. */ template <int kBlockSize, int kStaticChannels, typename Tile, typename Out, typename In, int kLoadAlign = 4> __device__ __forceinline__ Tile *slice_load_linear_tile( Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { static_assert(std::is_same_v<In, uint8_t>, "Only uint8_t types allowed now."); static_assert(kStaticChannels == 3, "Only 3 input channels allowed now."); static_assert(kLoadAlign % 4 == 0, "The loading alignment should be divisible by 4."); int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); // Strides use the input number of channels without the padding int in_stride = sample.input_W * sample.input_C; // this is intermediate stride, as if we were never padding the data, // so it is useful for filling the linear tile, keeping the xy offset int tile_stride = sample.W * sample.input_C; // The rows we start and end with, we are indexed by output coordinates int y_start = start_x / tile_stride; int y_end = end_x / tile_stride + 1; Tile *tile_row = tile; for (int y = y_start; y < y_end; y++) { int xc_start, xc_end; // The first row doesn't start with 0 due to tiling, the rest do. if (y == y_start) { xc_start = start_x - y_start * tile_stride; } else { xc_start = 0; } // Similarly for the end of row for last row if (y == y_end - 1) { xc_end = end_x - (y_end - 1) * tile_stride; } else { xc_end = tile_stride; } const In *prologue_in = sample.in + y * in_stride + xc_start; auto in_start = reinterpret_cast<std::uintptr_t>(prologue_in); // align to 4 auto aligned_in_start = align_up(in_start, kLoadAlign); uint32_t bytes_to_alignment = ::min(static_cast<int32_t>(aligned_in_start - in_start), xc_end - xc_start); Tile *prologue_tile = tile_row; Tile *aligned_tile = tile_row + bytes_to_alignment; const uchar4 *aligned_in_uchar4 = reinterpret_cast<const uchar4 *>(prologue_in + bytes_to_alignment); // prologue for (uint32_t idx = threadIdx.x; idx < bytes_to_alignment; idx += blockDim.x) { prologue_tile[idx] = prologue_in[idx]; } // this might be 0, as the prologue may be the full extend of the tile uint32_t left_after_prologue = xc_end - xc_start - bytes_to_alignment; // We read 4 values in each iteration uint32_t main_loop_length = left_after_prologue >> 2; // aligned load for (uint32_t idx = threadIdx.x; idx < main_loop_length; idx += blockDim.x) { uchar4 in = aligned_in_uchar4[idx]; aligned_tile[idx * 4 + 0] = in.x; aligned_tile[idx * 4 + 1] = in.y; aligned_tile[idx * 4 + 2] = in.z; aligned_tile[idx * 4 + 3] = in.w; } uint32_t processed_in_main = left_after_prologue & -4; // equivalent to (x / 4) * 4 uint32_t left_after_main = left_after_prologue - processed_in_main; // epilogue Tile *epilogue_tile = aligned_tile + processed_in_main; const In *epilogue_in = reinterpret_cast<const In *>(aligned_in_uchar4 + main_loop_length); for (uint32_t idx = threadIdx.x; idx < left_after_main; idx++) { epilogue_tile[idx] = epilogue_in[idx]; } tile_row += (xc_end - xc_start); } return tile; } /** * @brief Load the slices of linear tile into planar smem buffers. * * During the loading the values are distributed into separate planes in smem (keeping the same * sequential XY coordinates/offsets). Allows for faster access when building padded HWC output. * Each smem plane must hold kBlockSize / kStaticChannels elements. * * @tparam kBlockSize Tile size * @tparam kStaticChannels Number of input channels * @tparam Tile Type of the data kept after loading in the smem tile. * @tparam Out Output data type * @tparam In Input data type * @tparam kLoadAlign - Alignment (in bytes) of the main loop. * @param tile Shared memory where to load the data. * @param sample Sample description * @return Tile * - the pointer to the smem where the start of the loaded data is. */ template <int kBlockSize, int kStaticChannels, typename Tile, typename Out, typename In, int kLoadAlign = 4> __device__ __forceinline__ void load_planar_tile(Tile tile[][kBlockSize / kStaticChannels], const Hwc2HwcChwSampleDesc<Out, In> sample) { static_assert(std::is_same_v<In, uint8_t>, "Only uint8_t types allowed now."); static_assert(kStaticChannels == 3, "Only 3 input channels allowed now."); static_assert(kLoadAlign % 4 == 0, "The loading alignment should be divisible by 4."); int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); auto in_start = reinterpret_cast<std::uintptr_t>(sample.in + start_x); auto aligned_in_start = align_up(in_start, kLoadAlign); uint32_t bytes_to_alignment = ::min(aligned_in_start - in_start, end_x - start_x); const In *prologue_in = sample.in + start_x; const uchar4 *aligned_in_char4 = reinterpret_cast<const uchar4 *>(sample.in + start_x + bytes_to_alignment); // The tiles are multiple of 3, so we are always reading from the start of the pixel. fast_div<uint32_t> channel(kStaticChannels); // prologue for (uint32_t idx = threadIdx.x; idx < bytes_to_alignment; idx += blockDim.x) { uint32_t xy, c; xy = div_mod(c, idx, channel); tile[c][xy] = prologue_in[idx]; } // this might be 0, as the prologue may be the full extend of the tile uint32_t left_after_prologue = end_x - start_x - bytes_to_alignment; // We read 4 values in each iteration uint32_t main_loop_length = left_after_prologue >> 2; // main loop: aligned load and unpacking for (uint32_t idx = threadIdx.x; idx < main_loop_length; idx += blockDim.x) { uint32_t flat_idx = idx * 4 + bytes_to_alignment; uint32_t xy, c; xy = div_mod(c, flat_idx, channel); uchar4 in = aligned_in_char4[idx]; tile[c][xy] = in.x; c++; if (c == kStaticChannels) { c = 0; xy++; } tile[c][xy] = in.y; c++; if (c == kStaticChannels) { c = 0; xy++; } tile[c][xy] = in.z; c++; if (c == kStaticChannels) { c = 0; xy++; } tile[c][xy] = in.w; } uint32_t processed_in_main = left_after_prologue & -4; // equivalent to (x / 4) * 4 uint32_t left_after_main = left_after_prologue - processed_in_main; // epilogue const In *epilogue_in = reinterpret_cast<const In *>(aligned_in_char4 + main_loop_length); for (uint32_t idx = threadIdx.x; idx < left_after_main; idx++) { uint32_t flat_idx = processed_in_main + bytes_to_alignment + idx; uint32_t xy, c; xy = div_mod(c, flat_idx, channel); tile[c][xy] = epilogue_in[idx]; } } /** @} */ // end of Hwc2HwcChwLoad /** @defgroup Hwc2HwcChwStore Data storing for slice Hwc2{Hwc,Chw} Normalize Mirror-x Pad-channel * kernel * @{ */ /** * @brief Calculate the planar output offset to take optional mirroring into account. */ template <bool enable_mirror, typename Out, typename In> __device__ __forceinline__ int64_t calculate_offset_chw(int64_t planar_idx, const Hwc2HwcChwSampleDesc<Out, In> sample) { if constexpr (enable_mirror) { if (sample.flip_x) { int y = planar_idx / sample.W; int x = planar_idx - (int64_t)y * sample.W; int target_x = sample.W - 1 - x; return (int64_t)y * sample.W + target_x; } } return planar_idx; } template <int kBlockSize, int kStaticChannels, bool enable_mirror, bool enable_pad, typename Compute = float, typename Tile, typename Out, typename In> __device__ __forceinline__ void store_chw(Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); const auto *__restrict__ fill_values = static_cast<const Out *>(sample.fill_values); // Preload the norm values so they are accessed via registers and not from gmem via pointer. Compute norm_mul[kStaticChannels], norm_add[kStaticChannels]; #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { norm_mul[c] = sample.norm_mul[c]; norm_add[c] = sample.norm_add[c]; } // idx is not divided by the static channels (mostly the start_x) for (int64_t idx = threadIdx.x + start_x / kStaticChannels, base_x = threadIdx.x; idx < end_x / kStaticChannels; idx += blockDim.x, base_x += blockDim.x) { int64_t out_offset = calculate_offset_chw<enable_mirror>(idx, sample); #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { // the kStaticChannels == input_C Compute fpin = tile[base_x * sample.input_C + c]; Compute fpout = fmaf(fpin, norm_mul[c], norm_add[c]); sample.out[c * sample.H * sample.W + out_offset] = ConvertSat<Out>(fpout); } if constexpr (enable_pad) { for (int c = kStaticChannels; c < sample.C; c++) { sample.out[c * sample.H * sample.W + out_offset] = fill_values[c]; } } } } template <int kOutChannels> __device__ __forceinline__ int divide_by_channel(int xc) { if constexpr (kOutChannels == 3) { return xc / kOutChannels; } return xc >> 2; } /** * @brief Calculate the flat output offset for interleaved images to take optional mirroring into * account. */ template <bool enable_mirror, bool enable_pad, typename Out, typename In> __device__ __forceinline__ int64_t calculate_offset_hwc(int64_t flat_idx, int c, const Hwc2HwcChwSampleDesc<Out, In> sample) { constexpr int kOutChannels = enable_pad ? 4 : 3; if constexpr (enable_mirror) { if (sample.flip_x) { int y = flat_idx / (sample.W * kOutChannels); int xc = flat_idx - (int64_t)y * sample.W * kOutChannels; int x = divide_by_channel<kOutChannels>(xc); int target_x = sample.W - 1 - x; return (int64_t)y * sample.W * kOutChannels + target_x * kOutChannels + c; } } return flat_idx; } // TODO(klecki): Prepare a generic version that supports the planar layout in smem and evaluate. template <int kBlockSize, int kStaticChannels, bool enable_mirror, bool enable_pad, typename Compute, typename Tile, typename Out, typename In> __device__ __forceinline__ void store_hwc(Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); const auto *__restrict__ fill_values = static_cast<const Out *>(sample.fill_values); // Preload the norm values so they are accessed via registers and not from gmem via pointer. Compute norm_mul[kStaticChannels], norm_add[kStaticChannels]; #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { norm_mul[c] = sample.norm_mul[c]; norm_add[c] = sample.norm_add[c]; } // Assuming all samples are padded if constexpr (enable_pad) { constexpr int kOutChannels = kStaticChannels + 1; int64_t block_4 = (kBlockSize / kStaticChannels) * kOutChannels; int64_t sample_size_4 = (sample.sample_size / kStaticChannels) * kOutChannels; int64_t start_x_padded = static_cast<int64_t>(blockIdx.x - sample.first_block) * block_4; int64_t end_x_padded = ::min(start_x_padded + block_4, sample_size_4); for (int64_t idx = threadIdx.x + start_x_padded, base_x = threadIdx.x; idx < end_x_padded; idx += blockDim.x, base_x += blockDim.x) { int base_offset = base_x >> 2; int c = idx & 3; int64_t out_offset = calculate_offset_hwc<enable_mirror, enable_pad>(idx, c, sample); if (c < kStaticChannels) { Compute fpin = tile[base_offset * sample.input_C + c]; Compute fpout = fma(fpin, norm_mul[c], norm_add[c]); sample.out[out_offset] = ConvertSat<Out>(fpout); } else { sample.out[out_offset] = fill_values[c]; } } } else { // No padding, we just with the same offset (or mirrored x offset) fast_div<uint32_t> channels(kStaticChannels); for (int64_t idx = threadIdx.x + start_x, base_x = threadIdx.x; idx < end_x; idx += blockDim.x, base_x += blockDim.x) { int c = idx % channels; int64_t out_offset = calculate_offset_hwc<enable_mirror, enable_pad>(idx, c, sample); Compute fpin = tile[base_x]; Compute fpout = fma(fpin, norm_mul[c], norm_add[c]); sample.out[out_offset] = ConvertSat<Out>(fpout); } } } /** * @brief Store a tile of smem that is kept as planes in the HWC format. * * This version is specialized for uint8_t inputs and fp16 outputs + padding from 3 to 4 channels. * The output samples are expected to be aligned to at least 4-bytes allowing for vectorized * stores of __half2. * @tparam Compute Type to conduct computations in. * TODO(klecki): vectorized __half2 can be considered, float is ok. * @tparam Tile smem tile storage type */ template <int kBlockSize, int kStaticChannels, bool enable_mirror, typename Compute, typename Tile> __device__ __forceinline__ void store_planar_hwc_pad( Tile tile[][kBlockSize / kStaticChannels], const Hwc2HwcChwSampleDesc<float16, uint8_t> sample) { constexpr int kOutChannels = kStaticChannels + 1; int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); const auto *__restrict__ fill_values = static_cast<const float16 *>(sample.fill_values); // Preload the norm values so they are accessed via registers and not from gmem via pointer. Compute norm_mul[kOutChannels], norm_add[kOutChannels]; #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { norm_mul[c] = sample.norm_mul[c]; norm_add[c] = sample.norm_add[c]; } // put the fill value so it will be produced as a result of FMA norm_mul[3] = 0; norm_add[3] = sample.fill_values[3]; // Assuming all samples are padded int64_t block_4 = (kBlockSize / kStaticChannels) * kOutChannels; int64_t sample_size_4 = (sample.sample_size / kStaticChannels) * kOutChannels; int64_t start_x_padded = static_cast<int64_t>(blockIdx.x - sample.first_block) * block_4; int64_t end_x_padded = ::min(start_x_padded + block_4, sample_size_4); // TODO(klecki) in the version without mirror, we can keep one offset, as we can start the // output pointer at the output tile. auto *out_aligned = sample.out; auto *out_h2 = reinterpret_cast<__half2 *>(sample.out); uint32_t to_write = end_x_padded - start_x_padded; // loop is divided by two as we write two elements in each thread for (uint32_t base_x = threadIdx.x; base_x < to_write / 2; base_x += blockDim.x) { int base_offset = base_x / 2; int c = base_x & 1; int64_t out_offset; if constexpr (enable_mirror) { if (sample.flip_x) { int64_t idx = start_x_padded + base_x * 2; int y = idx / (sample.W * kOutChannels); int xc = idx - (int64_t)y * sample.W * kOutChannels; int x = xc / kOutChannels; int target_x = sample.W - 1 - x; // basically we divide the out_offset by two, The `c` is either 0 or 1. out_offset = (int64_t)y * sample.W * (kOutChannels / 2) + target_x * (kOutChannels / 2) + c; } else { out_offset = start_x_padded / 2 + base_x; } } else { out_offset = start_x_padded / 2 + base_x; } if (c == 0) { Compute fpin0 = tile[0][base_offset]; Compute fpin1 = tile[1][base_offset]; Compute fpout0 = fmaf(fpin0, norm_mul[0], norm_add[0]); Compute fpout1 = fmaf(fpin1, norm_mul[1], norm_add[1]); out_h2[out_offset] = make_half2(ConvertSat<float16>(fpout0), ConvertSat<float16>(fpout1)); } else { Compute fpin0 = tile[2][base_offset]; Compute fpout0 = fmaf(fpin0, norm_mul[2], norm_add[2]); // With more generic implementation, we could do the FMA for this value as well, but we // need to just pad it here. Compute fpout1 = norm_add[3]; out_h2[out_offset] = make_half2(ConvertSat<float16>(fpout0), ConvertSat<float16>(fpout1)); } } } /** @} */ // end of Hwc2HwcChwStore /** @defgroup Hwc2HwcChw The Slice Hwc2{Hwc,Chw} Normalize Mirror-x Pad-channel kernel * * Kernel that reads a HWC u8 image and outputs a HWC or CHW normalized float image, that can be * cropped in Y, X coordinates, mirrored in X coordinate, and the channels can be padded. * * High level structure of the kernel: * 1. Load tile of linear data from the image into shared memory, doing a cast to floating type. * a. Note, that the tile in shared memory can be represented either as an linear chunk with * interleaved channels or as separate channel planes. See the loading functions for details. * b. Each thread in loader loop maps to one value of the loaded image. * c. Tile in shared memory doesn't take the padded channels into account, it stores only the * input channels. * 2. Synchronize * 3. Output the data in correct layout, reading from the shared memory. * a. For CHW output each thread corresponds to a (Y, X) sequential offset into a plane, computes * the values for all the channels and writes them. Assuming 3-channel input, we can look * at the input as a sequential stream of values, where we distribute them (sequentially) * into 3 output planes. * b. Padding the output channels for CHW is done by filling additional planes with fill values. * c. For HWC output, in the simples case we can store the linear tile in the same order * as it was read. In case of padding, fill values must be inserted. * d. Mirroring is done by swapping the X-coordinate and recomputing the target offset for both * layouts. * * The kernel use a thread block size, that is divisible both by channel number: 3 (for the * non-padded output loop), and 4 (alignment for input loop and padded output loop). * * For better throughput, the read and write accesses to global memory are sequential, * using aligned 4-byte-wide access when possible. * @{ */ // TODO(klecki): generalize for wider input types /** * @brief Hwc2HwcChw Normalize Mirror-x Pad-channel kernel * This kernel does not support cropping the x coordinate, so the reads are fully linear. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void Hwc2HwcChwNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_chw<kBlockSize, kStaticChannels, enable_mirror, enable_pad>(loaded_tile, sample); } /** * @brief Slice Hwc2HwcChw Normalize [Mirror-x] [Pad-channel] kernel * This kernel supports cropping in x-coordinate. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void SliceHwc2HwcChwNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = slice_load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_chw<kBlockSize, kStaticChannels, enable_mirror, enable_pad>(loaded_tile, sample); } /** * @brief Hwc2Hwc Normalize [Mirror-x] [Pad-channel] kernel * This kernel does not support cropping the x coordinate, so the reads are fully linear. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void Hwc2HwcNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_hwc<kBlockSize, kStaticChannels, enable_mirror, enable_pad, Out>(loaded_tile, sample); } /** * @brief Slice Hwc2Hwc Normalize [Mirror-x] [Pad-channel] kernel * This kernel supports cropping in x-coordinate. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void SliceHwc2HwcNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = slice_load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_hwc<kBlockSize, kStaticChannels, enable_mirror, enable_pad, Out>(loaded_tile, sample); } /** * @brief Hwc2Hwc Normalize [Mirror-x] Pad-channel-always kernel for FP16. * * This kernel utilizes 4-byte reads and writes. The smem intermediate tile uses planar layout, * for better access to the image values during writing of the output. * The output samples are assumed to be aligned to the address that is multiple of 4, * thanks to the padding performed to 4 channels, it holds for every batch that is laid out * contiguously in memory with aligned start. This holds for forseeable future in DALI. */ template <typename Out, typename In, bool enable_mirror, int kBlockSize, int kStaticChannels> __global__ void Hwc2HwcNormalizePadFp16(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); constexpr int kOutChannels = kStaticChannels + 1; int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kStaticChannels][kBlockSize / kStaticChannels]; load_planar_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_planar_hwc_pad<kBlockSize, kStaticChannels, enable_mirror, float>(tile, sample); } /** @} */ // end of Hwc2HwcChw template <typename Out> KernelRequirements SliceHwc2HwcChwNormalizeGPU<Out>::Setup(KernelContext &ctx, const TensorListShape<ndim> &input_shape, span<const SampleArgs> args, TensorLayout output_layout) { (void)ctx; int num_samples = input_shape.num_samples(); DALI_ENFORCE(num_samples == static_cast<int>(args.size()), "Invalid number of samples in kernel args"); out_shape_ = TensorListShape<ndim>(num_samples, ndim); collapsed_tiling_shape_ = TensorListShape<1>(num_samples, 1); perm_ = output_layout == "HWC" ? std::array<int, 3>{0, 1, 2} : std::array<int, 3>{2, 0, 1}; output_layout_ = output_layout; SetupNumChannels(input_shape, args); DALI_ENFORCE(output_layout == "HWC" || output_layout == "CHW", "Only CHW and HWC output layouts allowed"); for (int i = 0; i < num_samples; i++) { // N.B. this function produces a HWC shape, that's why we need the permute auto out_sample_shape = ShapeFromRoi(args[i].roi, out_nchannels_); for (int d = 0; d < spatial_dim; d++) { DALI_ENFORCE(out_sample_shape[d] <= input_shape.tensor_shape_span(i)[d], make_string("Only cropping allowed, got a request for padding in dimension `", d, "` of sample ", i, ".")); } out_sample_shape = permute(out_sample_shape, perm_); out_shape_.set_tensor_shape(i, out_sample_shape); collapsed_tiling_shape_.set_tensor_shape(i, {volume(args[i].roi) * nchannels_}); } KernelRequirements req; req.output_shapes = {out_shape_}; return req; } template <typename Out> std::tuple<float *, float *, Out *> SliceHwc2HwcChwNormalizeGPU<Out>::SetupParams( KernelContext &ctx, span<const SampleArgs> args) { int num_samples = args.size(); float *norm_add_cpu = ctx.scratchpad->AllocatePinned<float>(num_samples * nchannels_); float *norm_mul_cpu = ctx.scratchpad->AllocatePinned<float>(num_samples * nchannels_); Out *fill_values_cpu = ctx.scratchpad->AllocatePinned<Out>(num_samples * out_nchannels_); for (int i = 0; i < num_samples; i++) { const auto &sample_arg = args[i]; auto *norm_add_data = norm_add_cpu + i * nchannels_; auto *norm_mul_data = norm_mul_cpu + i * nchannels_; int mean_sz = sample_arg.mean.size(); assert(mean_sz == sample_arg.inv_stddev.size()); int c = 0; for (; c < mean_sz; c++) { norm_add_data[c] = -sample_arg.mean[c] * sample_arg.inv_stddev[c]; norm_mul_data[c] = sample_arg.inv_stddev[c]; } for (; c < nchannels_; c++) { norm_add_data[c] = 0.0f; norm_mul_data[c] = 1.0f; } auto *fill_values_data = fill_values_cpu + i * out_nchannels_; int fill_values_sz = sample_arg.fill_values.size(); c = 0; for (; c < fill_values_sz; c++) fill_values_data[c] = ConvertSat<Out>(sample_arg.fill_values[c]); for (; c < out_nchannels_; c++) fill_values_data[c] = ConvertSat<Out>(0.0f); } return ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, make_span(norm_add_cpu, num_samples * nchannels_), make_span(norm_mul_cpu, num_samples * nchannels_), make_span(fill_values_cpu, num_samples * out_nchannels_)); } template <typename Out> auto SliceHwc2HwcChwNormalizeGPU<Out>::RealignSample( TensorView<StorageGPU, const In, ndim> in_sample, Roi<spatial_dim> roi) -> std::tuple<TensorView<StorageGPU, const In, ndim>, Roi<spatial_dim>> { const auto *data = in_sample.data; auto shape = in_sample.shape; // skip the cropped rows data += roi.lo.y * shape[1] * shape[2]; shape[0] = roi.extent().y; // skip the cropped columns data += roi.lo.x * shape[2]; return {TensorView<StorageGPU, const In, ndim>{data, shape}, {ivec<spatial_dim>{0}, roi.extent()}}; } template <typename Out> void SliceHwc2HwcChwNormalizeGPU<Out>::SetupNumChannels(const TensorListShape<ndim> &input_shape, span<const SampleArgs> args) { if (input_shape.num_samples() == 0) { return; } const auto first_shape = input_shape.tensor_shape_span(0); nchannels_ = first_shape[channel_dim]; for (int i = 1; i < input_shape.num_samples(); i++) { int ch = input_shape.tensor_shape_span(i)[channel_dim]; DALI_ENFORCE(nchannels_ == ch, make_string("All samples should have the same number of channels, expected ", nchannels_, " channels, got ", ch, " channels in sample ", i)); } DALI_ENFORCE( input_shape.num_samples() == static_cast<int>(args.size()), "Number of samples in the arguments should match the number of samples in the shape."); out_nchannels_ = ::max(nchannels_, static_cast<int>(args[0].fill_values.size())); for (int i = 1; i < input_shape.num_samples(); i++) { DALI_ENFORCE(args[i].fill_values.size() == args[0].fill_values.size(), "All sample arguments should have the same number of fill values."); } DALI_ENFORCE(nchannels_ == kStaticChannels, "Only 3 input channels are supported."); if (output_layout_ == "HWC") { // Padding in the operator cannot go higher than the closest power of 2, // but better have the check in place. DALI_ENFORCE(out_nchannels_ == kStaticChannels || out_nchannels_ == kStaticChannels + 1, "Only 3 or 4 output channels are supported for HWC output layout."); } } template <typename Out> void SliceHwc2HwcChwNormalizeGPU<Out>::Run(KernelContext &ctx, const TensorListView<StorageGPU, Out, ndim> &out, const TensorListView<StorageGPU, const In, ndim> &in, span<const SampleArgs> args) { using SampleDesc = Hwc2HwcChwSampleDesc<Out, In>; int num_samples = in.num_samples(); SampleDesc *sample_descs_cpu = ctx.scratchpad->AllocatePinned<SampleDesc>(num_samples); uint32_t *first_blocks_cpu = ctx.scratchpad->AllocatePinned<uint32_t>(num_samples); auto [norm_add_gpu, norm_mul_gpu, fill_values_gpu] = SetupParams(ctx, args); bool need_pad = out_nchannels_ != nchannels_; bool need_crop_x = false; bool need_flip_x = false; // Check if all the outputs are aligned to 4 bytes, used by the specialized FP16 PAD HWC -> HWC // implementation. With the current state of DALI, the start of output allocation is aligned // (to even higher power of two), and all the samples have length that is multiple of 4 (padded to // 4 channels), that is if they are in contiguous allocation, all output samples are still aligned // to a multiple of 4. bool outputs_aligned_4 = true; uint32_t offset_blk = 0; int nonempty_samples = 0; for (int sample_id = 0; sample_id < num_samples; sample_id++) { auto [in_sample, in_roi] = RealignSample(in[sample_id], args[sample_id].roi); // we adjusted the in_roi to start from 0, so roi.extent() == roi.hi if (in_sample.shape[1] != in_roi.hi.x) { need_crop_x = true; } int64_t sample_size = collapsed_tiling_shape_[sample_id][0]; if (sample_size == 0) { continue; } auto &sample_desc = sample_descs_cpu[nonempty_samples]; auto &first_block = first_blocks_cpu[nonempty_samples++]; sample_desc.in = in_sample.data; sample_desc.out = out.tensor_data(sample_id); if (reinterpret_cast<std::uintptr_t>(sample_desc.out) % 4) { outputs_aligned_4 = false; } first_block = offset_blk; sample_desc.first_block = offset_blk; sample_desc.sample_size = sample_size; offset_blk += div_ceil(sample_size, kBlockSizeMul * kBlockWidth); // The output shape here is after the permutation if (output_layout_ == "CHW") { sample_desc.H = out.tensor_shape(sample_id)[1]; sample_desc.W = out.tensor_shape(sample_id)[2]; sample_desc.C = out.tensor_shape(sample_id)[0]; // out_nchannels_ } else { sample_desc.H = out.tensor_shape(sample_id)[0]; sample_desc.W = out.tensor_shape(sample_id)[1]; sample_desc.C = out.tensor_shape(sample_id)[2]; // out_nchannels_ } sample_desc.input_W = in_sample.shape[1]; sample_desc.input_C = in_sample.shape[2]; // nchannels_ sample_desc.norm_add = norm_add_gpu + sample_id * nchannels_; sample_desc.norm_mul = norm_mul_gpu + sample_id * nchannels_; sample_desc.fill_values = fill_values_gpu + sample_id * out_nchannels_; sample_desc.flip_x = args[sample_id].flip_x; if (args[sample_id].flip_x) { need_flip_x = true; } } if (nonempty_samples == 0) return; auto [sample_descs_gpu, first_blocks_gpu] = ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, make_span(sample_descs_cpu, nonempty_samples), make_span(first_blocks_cpu, nonempty_samples)); // TODO(klecki): Maybe this selection can be simplified, but making the output layout // a parameter would probably make it even less readable. // This version allows utilizing specialized implementations for every layout more easily. if (output_layout_ == "CHW") { auto dispatch = [samples = sample_descs_gpu, blocks = first_blocks_gpu, &ctx, need_crop_x, offset_blk, nonempty_samples](auto pad_v, auto flip_x_v) { if (need_crop_x) { hipLaunchKernelGGL(( SliceHwc2HwcChwNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels>) , dim3(offset_blk), dim3(kThreadBlockSize), 0, ctx.gpu.stream, samples, blocks, nonempty_samples); } else { hipLaunchKernelGGL(( Hwc2HwcChwNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels>), dim3(offset_blk), dim3(kThreadBlockSize), 0, ctx.gpu.stream, samples, blocks, nonempty_samples); } }; auto dispatch_flip = [&](auto pad_v, bool flip_x) { if (flip_x) { dispatch(pad_v, std::true_type{}); } else { dispatch(pad_v, std::false_type{}); } }; if (need_pad) { dispatch_flip(std::true_type{}, need_flip_x); } else { dispatch_flip(std::false_type{}, need_flip_x); } } else { auto dispatch = [samples = sample_descs_gpu, blocks = first_blocks_gpu, &ctx, need_crop_x, offset_blk, nonempty_samples](auto pad_v, auto flip_x_v, auto out_aligned_v) { if (need_crop_x) { hipLaunchKernelGGL(( SliceHwc2HwcNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels>), dim3(offset_blk), dim3(kThreadBlockSize), 0, ctx.gpu.stream, samples, blocks, nonempty_samples); } else { if constexpr (std::is_same_v<Out, float16> && pad_v.value && out_aligned_v.value) { hipLaunchKernelGGL(( Hwc2HwcNormalizePadFp16<Out, In, flip_x_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels>) , dim3(offset_blk), dim3(kThreadBlockSize), 0, ctx.gpu.stream, samples, blocks, nonempty_samples); } else { hipLaunchKernelGGL(( Hwc2HwcNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels>), dim3(offset_blk), dim3(kThreadBlockSize), 0, ctx.gpu.stream, samples, blocks, nonempty_samples); } } }; auto dispatch_aligned = [&](auto pad_v, auto flip_x_v, bool out_aligned) { if (out_aligned) { dispatch(pad_v, flip_x_v, std::true_type{}); } else { dispatch(pad_v, flip_x_v, std::false_type{}); } }; auto dispatch_flip = [&](auto pad_v, bool flip_x, bool out_aligned) { if (flip_x) { dispatch_aligned(pad_v, std::true_type{}, out_aligned); } else { dispatch_aligned(pad_v, std::false_type{}, out_aligned); } }; if (need_pad) { dispatch_flip(std::true_type{}, need_flip_x, outputs_aligned_4); } else { dispatch_flip(std::false_type{}, need_flip_x, outputs_aligned_4); } } CUDA_CALL(hipGetLastError()); } template class DLL_PUBLIC SliceHwc2HwcChwNormalizeGPU<float>; template class DLL_PUBLIC SliceHwc2HwcChwNormalizeGPU<float16>; } // namespace slice_flip_normalize } // namespace kernels } // namespace dali
d9a0f59d974205c8404d0312265eb8d50fa69f34.cu
// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <tuple> #include <type_traits> #include "dali/core/backend_tags.h" #include "dali/core/convert.h" #include "dali/core/error_handling.h" #include "dali/core/fast_div.h" #include "dali/core/float16.h" #include "dali/core/permute.h" #include "dali/core/static_switch.h" #include "dali/kernels/slice/slice_hwc2chw_normalize_gpu.h" namespace dali { namespace kernels { namespace slice_flip_normalize { template <typename Out, typename In> struct Hwc2HwcChwSampleDesc { Out *__restrict__ out; const In *__restrict__ in; const float *__restrict__ norm_add; const float *__restrict__ norm_mul; const Out *__restrict__ fill_values; int64_t sample_size; uint32_t first_block; // Dimensions of the output int H, W, C; // Dimensions of the input (relevant to input stride) int input_W, input_C; bool flip_x; }; // TODO(klecki): Generalize the utility for binsearch indexing of thread blocks with Cast kernel. inline __device__ uint32_t FindSampleIdx(const uint32_t *first_blocks, uint32_t num_samples) { uint32_t i = 0; for (uint32_t jump = (1 << (32 - __clz(num_samples) - 1)); jump; jump >>= 1) { if (i + jump < num_samples && first_blocks[i + jump] <= blockIdx.x) i += jump; } return i; } /** @defgroup Hwc2HwcChwLoad Data loading for slice Hwc2{Hwc,Chw} Normalize Mirror-x Pad-channel * kernel Load the data from linear chunk of HWC u8 image into a tile in shared memory. The loading * loop consists of three stages: * 1. Prologue - read from the start of the tile to the address that is multiple of 4 byte alignment * 2. Main loop - read most of the tile via uchar4, utilizing 4-byte read instructions. * 3. Epilogue - read the remainder of data that is not covered by the two previous loops. * * The slicing variant is addressed reads only the values required by the output, proceeding * row by row, using the same pattern as above for each row. * Samples are adjusted so that rows slices start at 0, and only the end of row is sliced. * @{ */ /** * @brief Load the linear tile into linear smem buffer. * * @tparam kBlockSize Tile size * @tparam kStaticChannels Number of input channels * @tparam Tile Type of the data kept after loading in the smem tile. * @tparam Out Output data type * @tparam In Input data type * @tparam kLoadAlign - Alignment (in bytes) of the main loop. The access to smem is also aligned * to this value, so depending on the prologue length, the data after loading may not start * at the tile[0]. The start of actual data is returned. * The smem tile must hold at least kBlockSize + kLoadAlign elements. * @param tile Shared memory where to load the data. * @param sample Sample description * @return Tile * - the pointer to the smem where the start of the loaded data is. */ template <int kBlockSize, int kStaticChannels, typename Tile, typename Out, typename In, int kLoadAlign = 32 * 4> __device__ __forceinline__ Tile *load_linear_tile(Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { static_assert(std::is_same_v<In, uint8_t>, "Only uint8_t types allowed now."); static_assert(kStaticChannels == 3, "Only 3 input channels allowed now."); static_assert(kLoadAlign % 4 == 0, "The loading alignment should be divisible by 4."); int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); auto in_start = reinterpret_cast<std::uintptr_t>(sample.in + start_x); auto aligned_in_start = align_up(in_start, kLoadAlign); // In case if end_x - start_x < kLoadAlign, we never get to the aligned main loop uint32_t bytes_to_alignment = ::min(aligned_in_start - in_start, end_x - start_x); Tile *aligned_tile = tile + kLoadAlign; Tile *prologue_tile = aligned_tile - bytes_to_alignment; const In *prologue_in = sample.in + start_x; const uchar4 *aligned_in_uchar4 = reinterpret_cast<const uchar4 *>(sample.in + start_x + bytes_to_alignment); // prologue for (uint32_t idx = threadIdx.x; idx < bytes_to_alignment; idx += blockDim.x) { prologue_tile[idx] = prologue_in[idx]; } // this might be 0, as the prologue may be the full extend of the tile uint32_t left_after_prologue = end_x - start_x - bytes_to_alignment; // We read 4 values in each iteration uint32_t main_loop_length = left_after_prologue >> 2; // main loop: aligned load for (uint32_t idx = threadIdx.x; idx < main_loop_length; idx += blockDim.x) { uchar4 in = aligned_in_uchar4[idx]; aligned_tile[idx * 4 + 0] = in.x; aligned_tile[idx * 4 + 1] = in.y; aligned_tile[idx * 4 + 2] = in.z; aligned_tile[idx * 4 + 3] = in.w; } uint32_t processed_in_main = left_after_prologue & -4; // equivalent to (x / 4) * 4 uint32_t left_after_main = left_after_prologue - processed_in_main; // epilogue Tile *epilogue_tile = aligned_tile + processed_in_main; const In *epilogue_in = reinterpret_cast<const In *>(aligned_in_uchar4 + main_loop_length); for (uint32_t idx = threadIdx.x; idx < left_after_main; idx++) { epilogue_tile[idx] = epilogue_in[idx]; } // Return the start of the tile return prologue_tile; } /** * @brief Load the slices of linear tile into linear smem buffer. * * The kernel proceeds row-by-row, reading the output width elements/pixels, skipping the remaining * input_width - output_width pixels. * * @tparam kBlockSize Tile size * @tparam kStaticChannels Number of input channels * @tparam Tile Type of the data kept after loading in the smem tile. * @tparam Out Output data type * @tparam In Input data type * @tparam kLoadAlign - Alignment (in bytes) of the main loop. * The smem tile must hold at least kBlockSize + kLoadAlign elements. * @param tile Shared memory where to load the data. * @param sample Sample description * @return Tile * - the pointer to the smem where the start of the loaded data is. */ template <int kBlockSize, int kStaticChannels, typename Tile, typename Out, typename In, int kLoadAlign = 4> __device__ __forceinline__ Tile *slice_load_linear_tile( Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { static_assert(std::is_same_v<In, uint8_t>, "Only uint8_t types allowed now."); static_assert(kStaticChannels == 3, "Only 3 input channels allowed now."); static_assert(kLoadAlign % 4 == 0, "The loading alignment should be divisible by 4."); int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); // Strides use the input number of channels without the padding int in_stride = sample.input_W * sample.input_C; // this is intermediate stride, as if we were never padding the data, // so it is useful for filling the linear tile, keeping the xy offset int tile_stride = sample.W * sample.input_C; // The rows we start and end with, we are indexed by output coordinates int y_start = start_x / tile_stride; int y_end = end_x / tile_stride + 1; Tile *tile_row = tile; for (int y = y_start; y < y_end; y++) { int xc_start, xc_end; // The first row doesn't start with 0 due to tiling, the rest do. if (y == y_start) { xc_start = start_x - y_start * tile_stride; } else { xc_start = 0; } // Similarly for the end of row for last row if (y == y_end - 1) { xc_end = end_x - (y_end - 1) * tile_stride; } else { xc_end = tile_stride; } const In *prologue_in = sample.in + y * in_stride + xc_start; auto in_start = reinterpret_cast<std::uintptr_t>(prologue_in); // align to 4 auto aligned_in_start = align_up(in_start, kLoadAlign); uint32_t bytes_to_alignment = ::min(static_cast<int32_t>(aligned_in_start - in_start), xc_end - xc_start); Tile *prologue_tile = tile_row; Tile *aligned_tile = tile_row + bytes_to_alignment; const uchar4 *aligned_in_uchar4 = reinterpret_cast<const uchar4 *>(prologue_in + bytes_to_alignment); // prologue for (uint32_t idx = threadIdx.x; idx < bytes_to_alignment; idx += blockDim.x) { prologue_tile[idx] = prologue_in[idx]; } // this might be 0, as the prologue may be the full extend of the tile uint32_t left_after_prologue = xc_end - xc_start - bytes_to_alignment; // We read 4 values in each iteration uint32_t main_loop_length = left_after_prologue >> 2; // aligned load for (uint32_t idx = threadIdx.x; idx < main_loop_length; idx += blockDim.x) { uchar4 in = aligned_in_uchar4[idx]; aligned_tile[idx * 4 + 0] = in.x; aligned_tile[idx * 4 + 1] = in.y; aligned_tile[idx * 4 + 2] = in.z; aligned_tile[idx * 4 + 3] = in.w; } uint32_t processed_in_main = left_after_prologue & -4; // equivalent to (x / 4) * 4 uint32_t left_after_main = left_after_prologue - processed_in_main; // epilogue Tile *epilogue_tile = aligned_tile + processed_in_main; const In *epilogue_in = reinterpret_cast<const In *>(aligned_in_uchar4 + main_loop_length); for (uint32_t idx = threadIdx.x; idx < left_after_main; idx++) { epilogue_tile[idx] = epilogue_in[idx]; } tile_row += (xc_end - xc_start); } return tile; } /** * @brief Load the slices of linear tile into planar smem buffers. * * During the loading the values are distributed into separate planes in smem (keeping the same * sequential XY coordinates/offsets). Allows for faster access when building padded HWC output. * Each smem plane must hold kBlockSize / kStaticChannels elements. * * @tparam kBlockSize Tile size * @tparam kStaticChannels Number of input channels * @tparam Tile Type of the data kept after loading in the smem tile. * @tparam Out Output data type * @tparam In Input data type * @tparam kLoadAlign - Alignment (in bytes) of the main loop. * @param tile Shared memory where to load the data. * @param sample Sample description * @return Tile * - the pointer to the smem where the start of the loaded data is. */ template <int kBlockSize, int kStaticChannels, typename Tile, typename Out, typename In, int kLoadAlign = 4> __device__ __forceinline__ void load_planar_tile(Tile tile[][kBlockSize / kStaticChannels], const Hwc2HwcChwSampleDesc<Out, In> sample) { static_assert(std::is_same_v<In, uint8_t>, "Only uint8_t types allowed now."); static_assert(kStaticChannels == 3, "Only 3 input channels allowed now."); static_assert(kLoadAlign % 4 == 0, "The loading alignment should be divisible by 4."); int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); auto in_start = reinterpret_cast<std::uintptr_t>(sample.in + start_x); auto aligned_in_start = align_up(in_start, kLoadAlign); uint32_t bytes_to_alignment = ::min(aligned_in_start - in_start, end_x - start_x); const In *prologue_in = sample.in + start_x; const uchar4 *aligned_in_char4 = reinterpret_cast<const uchar4 *>(sample.in + start_x + bytes_to_alignment); // The tiles are multiple of 3, so we are always reading from the start of the pixel. fast_div<uint32_t> channel(kStaticChannels); // prologue for (uint32_t idx = threadIdx.x; idx < bytes_to_alignment; idx += blockDim.x) { uint32_t xy, c; xy = div_mod(c, idx, channel); tile[c][xy] = prologue_in[idx]; } // this might be 0, as the prologue may be the full extend of the tile uint32_t left_after_prologue = end_x - start_x - bytes_to_alignment; // We read 4 values in each iteration uint32_t main_loop_length = left_after_prologue >> 2; // main loop: aligned load and unpacking for (uint32_t idx = threadIdx.x; idx < main_loop_length; idx += blockDim.x) { uint32_t flat_idx = idx * 4 + bytes_to_alignment; uint32_t xy, c; xy = div_mod(c, flat_idx, channel); uchar4 in = aligned_in_char4[idx]; tile[c][xy] = in.x; c++; if (c == kStaticChannels) { c = 0; xy++; } tile[c][xy] = in.y; c++; if (c == kStaticChannels) { c = 0; xy++; } tile[c][xy] = in.z; c++; if (c == kStaticChannels) { c = 0; xy++; } tile[c][xy] = in.w; } uint32_t processed_in_main = left_after_prologue & -4; // equivalent to (x / 4) * 4 uint32_t left_after_main = left_after_prologue - processed_in_main; // epilogue const In *epilogue_in = reinterpret_cast<const In *>(aligned_in_char4 + main_loop_length); for (uint32_t idx = threadIdx.x; idx < left_after_main; idx++) { uint32_t flat_idx = processed_in_main + bytes_to_alignment + idx; uint32_t xy, c; xy = div_mod(c, flat_idx, channel); tile[c][xy] = epilogue_in[idx]; } } /** @} */ // end of Hwc2HwcChwLoad /** @defgroup Hwc2HwcChwStore Data storing for slice Hwc2{Hwc,Chw} Normalize Mirror-x Pad-channel * kernel * @{ */ /** * @brief Calculate the planar output offset to take optional mirroring into account. */ template <bool enable_mirror, typename Out, typename In> __device__ __forceinline__ int64_t calculate_offset_chw(int64_t planar_idx, const Hwc2HwcChwSampleDesc<Out, In> sample) { if constexpr (enable_mirror) { if (sample.flip_x) { int y = planar_idx / sample.W; int x = planar_idx - (int64_t)y * sample.W; int target_x = sample.W - 1 - x; return (int64_t)y * sample.W + target_x; } } return planar_idx; } template <int kBlockSize, int kStaticChannels, bool enable_mirror, bool enable_pad, typename Compute = float, typename Tile, typename Out, typename In> __device__ __forceinline__ void store_chw(Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); const auto *__restrict__ fill_values = static_cast<const Out *>(sample.fill_values); // Preload the norm values so they are accessed via registers and not from gmem via pointer. Compute norm_mul[kStaticChannels], norm_add[kStaticChannels]; #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { norm_mul[c] = sample.norm_mul[c]; norm_add[c] = sample.norm_add[c]; } // idx is not divided by the static channels (mostly the start_x) for (int64_t idx = threadIdx.x + start_x / kStaticChannels, base_x = threadIdx.x; idx < end_x / kStaticChannels; idx += blockDim.x, base_x += blockDim.x) { int64_t out_offset = calculate_offset_chw<enable_mirror>(idx, sample); #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { // the kStaticChannels == input_C Compute fpin = tile[base_x * sample.input_C + c]; Compute fpout = fmaf(fpin, norm_mul[c], norm_add[c]); sample.out[c * sample.H * sample.W + out_offset] = ConvertSat<Out>(fpout); } if constexpr (enable_pad) { for (int c = kStaticChannels; c < sample.C; c++) { sample.out[c * sample.H * sample.W + out_offset] = fill_values[c]; } } } } template <int kOutChannels> __device__ __forceinline__ int divide_by_channel(int xc) { if constexpr (kOutChannels == 3) { return xc / kOutChannels; } return xc >> 2; } /** * @brief Calculate the flat output offset for interleaved images to take optional mirroring into * account. */ template <bool enable_mirror, bool enable_pad, typename Out, typename In> __device__ __forceinline__ int64_t calculate_offset_hwc(int64_t flat_idx, int c, const Hwc2HwcChwSampleDesc<Out, In> sample) { constexpr int kOutChannels = enable_pad ? 4 : 3; if constexpr (enable_mirror) { if (sample.flip_x) { int y = flat_idx / (sample.W * kOutChannels); int xc = flat_idx - (int64_t)y * sample.W * kOutChannels; int x = divide_by_channel<kOutChannels>(xc); int target_x = sample.W - 1 - x; return (int64_t)y * sample.W * kOutChannels + target_x * kOutChannels + c; } } return flat_idx; } // TODO(klecki): Prepare a generic version that supports the planar layout in smem and evaluate. template <int kBlockSize, int kStaticChannels, bool enable_mirror, bool enable_pad, typename Compute, typename Tile, typename Out, typename In> __device__ __forceinline__ void store_hwc(Tile *tile, const Hwc2HwcChwSampleDesc<Out, In> sample) { int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); const auto *__restrict__ fill_values = static_cast<const Out *>(sample.fill_values); // Preload the norm values so they are accessed via registers and not from gmem via pointer. Compute norm_mul[kStaticChannels], norm_add[kStaticChannels]; #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { norm_mul[c] = sample.norm_mul[c]; norm_add[c] = sample.norm_add[c]; } // Assuming all samples are padded if constexpr (enable_pad) { constexpr int kOutChannels = kStaticChannels + 1; int64_t block_4 = (kBlockSize / kStaticChannels) * kOutChannels; int64_t sample_size_4 = (sample.sample_size / kStaticChannels) * kOutChannels; int64_t start_x_padded = static_cast<int64_t>(blockIdx.x - sample.first_block) * block_4; int64_t end_x_padded = ::min(start_x_padded + block_4, sample_size_4); for (int64_t idx = threadIdx.x + start_x_padded, base_x = threadIdx.x; idx < end_x_padded; idx += blockDim.x, base_x += blockDim.x) { int base_offset = base_x >> 2; int c = idx & 3; int64_t out_offset = calculate_offset_hwc<enable_mirror, enable_pad>(idx, c, sample); if (c < kStaticChannels) { Compute fpin = tile[base_offset * sample.input_C + c]; Compute fpout = fma(fpin, norm_mul[c], norm_add[c]); sample.out[out_offset] = ConvertSat<Out>(fpout); } else { sample.out[out_offset] = fill_values[c]; } } } else { // No padding, we just with the same offset (or mirrored x offset) fast_div<uint32_t> channels(kStaticChannels); for (int64_t idx = threadIdx.x + start_x, base_x = threadIdx.x; idx < end_x; idx += blockDim.x, base_x += blockDim.x) { int c = idx % channels; int64_t out_offset = calculate_offset_hwc<enable_mirror, enable_pad>(idx, c, sample); Compute fpin = tile[base_x]; Compute fpout = fma(fpin, norm_mul[c], norm_add[c]); sample.out[out_offset] = ConvertSat<Out>(fpout); } } } /** * @brief Store a tile of smem that is kept as planes in the HWC format. * * This version is specialized for uint8_t inputs and fp16 outputs + padding from 3 to 4 channels. * The output samples are expected to be aligned to at least 4-bytes allowing for vectorized * stores of __half2. * @tparam Compute Type to conduct computations in. * TODO(klecki): vectorized __half2 can be considered, float is ok. * @tparam Tile smem tile storage type */ template <int kBlockSize, int kStaticChannels, bool enable_mirror, typename Compute, typename Tile> __device__ __forceinline__ void store_planar_hwc_pad( Tile tile[][kBlockSize / kStaticChannels], const Hwc2HwcChwSampleDesc<float16, uint8_t> sample) { constexpr int kOutChannels = kStaticChannels + 1; int64_t start_x = (blockIdx.x - sample.first_block) * kBlockSize; int64_t end_x = ::min(start_x + kBlockSize, sample.sample_size); const auto *__restrict__ fill_values = static_cast<const float16 *>(sample.fill_values); // Preload the norm values so they are accessed via registers and not from gmem via pointer. Compute norm_mul[kOutChannels], norm_add[kOutChannels]; #pragma unroll kStaticChannels for (int c = 0; c < kStaticChannels; c++) { norm_mul[c] = sample.norm_mul[c]; norm_add[c] = sample.norm_add[c]; } // put the fill value so it will be produced as a result of FMA norm_mul[3] = 0; norm_add[3] = sample.fill_values[3]; // Assuming all samples are padded int64_t block_4 = (kBlockSize / kStaticChannels) * kOutChannels; int64_t sample_size_4 = (sample.sample_size / kStaticChannels) * kOutChannels; int64_t start_x_padded = static_cast<int64_t>(blockIdx.x - sample.first_block) * block_4; int64_t end_x_padded = ::min(start_x_padded + block_4, sample_size_4); // TODO(klecki) in the version without mirror, we can keep one offset, as we can start the // output pointer at the output tile. auto *out_aligned = sample.out; auto *out_h2 = reinterpret_cast<__half2 *>(sample.out); uint32_t to_write = end_x_padded - start_x_padded; // loop is divided by two as we write two elements in each thread for (uint32_t base_x = threadIdx.x; base_x < to_write / 2; base_x += blockDim.x) { int base_offset = base_x / 2; int c = base_x & 1; int64_t out_offset; if constexpr (enable_mirror) { if (sample.flip_x) { int64_t idx = start_x_padded + base_x * 2; int y = idx / (sample.W * kOutChannels); int xc = idx - (int64_t)y * sample.W * kOutChannels; int x = xc / kOutChannels; int target_x = sample.W - 1 - x; // basically we divide the out_offset by two, The `c` is either 0 or 1. out_offset = (int64_t)y * sample.W * (kOutChannels / 2) + target_x * (kOutChannels / 2) + c; } else { out_offset = start_x_padded / 2 + base_x; } } else { out_offset = start_x_padded / 2 + base_x; } if (c == 0) { Compute fpin0 = tile[0][base_offset]; Compute fpin1 = tile[1][base_offset]; Compute fpout0 = fmaf(fpin0, norm_mul[0], norm_add[0]); Compute fpout1 = fmaf(fpin1, norm_mul[1], norm_add[1]); out_h2[out_offset] = make_half2(ConvertSat<float16>(fpout0), ConvertSat<float16>(fpout1)); } else { Compute fpin0 = tile[2][base_offset]; Compute fpout0 = fmaf(fpin0, norm_mul[2], norm_add[2]); // With more generic implementation, we could do the FMA for this value as well, but we // need to just pad it here. Compute fpout1 = norm_add[3]; out_h2[out_offset] = make_half2(ConvertSat<float16>(fpout0), ConvertSat<float16>(fpout1)); } } } /** @} */ // end of Hwc2HwcChwStore /** @defgroup Hwc2HwcChw The Slice Hwc2{Hwc,Chw} Normalize Mirror-x Pad-channel kernel * * Kernel that reads a HWC u8 image and outputs a HWC or CHW normalized float image, that can be * cropped in Y, X coordinates, mirrored in X coordinate, and the channels can be padded. * * High level structure of the kernel: * 1. Load tile of linear data from the image into shared memory, doing a cast to floating type. * a. Note, that the tile in shared memory can be represented either as an linear chunk with * interleaved channels or as separate channel planes. See the loading functions for details. * b. Each thread in loader loop maps to one value of the loaded image. * c. Tile in shared memory doesn't take the padded channels into account, it stores only the * input channels. * 2. Synchronize * 3. Output the data in correct layout, reading from the shared memory. * a. For CHW output each thread corresponds to a (Y, X) sequential offset into a plane, computes * the values for all the channels and writes them. Assuming 3-channel input, we can look * at the input as a sequential stream of values, where we distribute them (sequentially) * into 3 output planes. * b. Padding the output channels for CHW is done by filling additional planes with fill values. * c. For HWC output, in the simples case we can store the linear tile in the same order * as it was read. In case of padding, fill values must be inserted. * d. Mirroring is done by swapping the X-coordinate and recomputing the target offset for both * layouts. * * The kernel use a thread block size, that is divisible both by channel number: 3 (for the * non-padded output loop), and 4 (alignment for input loop and padded output loop). * * For better throughput, the read and write accesses to global memory are sequential, * using aligned 4-byte-wide access when possible. * @{ */ // TODO(klecki): generalize for wider input types /** * @brief Hwc2HwcChw Normalize Mirror-x Pad-channel kernel * This kernel does not support cropping the x coordinate, so the reads are fully linear. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void Hwc2HwcChwNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_chw<kBlockSize, kStaticChannels, enable_mirror, enable_pad>(loaded_tile, sample); } /** * @brief Slice Hwc2HwcChw Normalize [Mirror-x] [Pad-channel] kernel * This kernel supports cropping in x-coordinate. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void SliceHwc2HwcChwNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = slice_load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_chw<kBlockSize, kStaticChannels, enable_mirror, enable_pad>(loaded_tile, sample); } /** * @brief Hwc2Hwc Normalize [Mirror-x] [Pad-channel] kernel * This kernel does not support cropping the x coordinate, so the reads are fully linear. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void Hwc2HwcNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_hwc<kBlockSize, kStaticChannels, enable_mirror, enable_pad, Out>(loaded_tile, sample); } /** * @brief Slice Hwc2Hwc Normalize [Mirror-x] [Pad-channel] kernel * This kernel supports cropping in x-coordinate. */ template <typename Out, typename In, bool enable_mirror, bool enable_pad, int kBlockSize, int kStaticChannels> __global__ void SliceHwc2HwcNormalize(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kBlockSize + 32 * 4]; float *loaded_tile = slice_load_linear_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_hwc<kBlockSize, kStaticChannels, enable_mirror, enable_pad, Out>(loaded_tile, sample); } /** * @brief Hwc2Hwc Normalize [Mirror-x] Pad-channel-always kernel for FP16. * * This kernel utilizes 4-byte reads and writes. The smem intermediate tile uses planar layout, * for better access to the image values during writing of the output. * The output samples are assumed to be aligned to the address that is multiple of 4, * thanks to the padding performed to 4 channels, it holds for every batch that is laid out * contiguously in memory with aligned start. This holds for forseeable future in DALI. */ template <typename Out, typename In, bool enable_mirror, int kBlockSize, int kStaticChannels> __global__ void Hwc2HwcNormalizePadFp16(const Hwc2HwcChwSampleDesc<Out, In> *samples, uint32_t *first_blocks, uint32_t num_samples) { static_assert(std::is_same<In, uint8_t>::value, "Only uint8_t supported as input"); constexpr int kOutChannels = kStaticChannels + 1; int sample_idx = FindSampleIdx(first_blocks, num_samples); const auto sample = samples[sample_idx]; __shared__ float tile[kStaticChannels][kBlockSize / kStaticChannels]; load_planar_tile<kBlockSize, kStaticChannels>(tile, sample); __syncthreads(); store_planar_hwc_pad<kBlockSize, kStaticChannels, enable_mirror, float>(tile, sample); } /** @} */ // end of Hwc2HwcChw template <typename Out> KernelRequirements SliceHwc2HwcChwNormalizeGPU<Out>::Setup(KernelContext &ctx, const TensorListShape<ndim> &input_shape, span<const SampleArgs> args, TensorLayout output_layout) { (void)ctx; int num_samples = input_shape.num_samples(); DALI_ENFORCE(num_samples == static_cast<int>(args.size()), "Invalid number of samples in kernel args"); out_shape_ = TensorListShape<ndim>(num_samples, ndim); collapsed_tiling_shape_ = TensorListShape<1>(num_samples, 1); perm_ = output_layout == "HWC" ? std::array<int, 3>{0, 1, 2} : std::array<int, 3>{2, 0, 1}; output_layout_ = output_layout; SetupNumChannels(input_shape, args); DALI_ENFORCE(output_layout == "HWC" || output_layout == "CHW", "Only CHW and HWC output layouts allowed"); for (int i = 0; i < num_samples; i++) { // N.B. this function produces a HWC shape, that's why we need the permute auto out_sample_shape = ShapeFromRoi(args[i].roi, out_nchannels_); for (int d = 0; d < spatial_dim; d++) { DALI_ENFORCE(out_sample_shape[d] <= input_shape.tensor_shape_span(i)[d], make_string("Only cropping allowed, got a request for padding in dimension `", d, "` of sample ", i, ".")); } out_sample_shape = permute(out_sample_shape, perm_); out_shape_.set_tensor_shape(i, out_sample_shape); collapsed_tiling_shape_.set_tensor_shape(i, {volume(args[i].roi) * nchannels_}); } KernelRequirements req; req.output_shapes = {out_shape_}; return req; } template <typename Out> std::tuple<float *, float *, Out *> SliceHwc2HwcChwNormalizeGPU<Out>::SetupParams( KernelContext &ctx, span<const SampleArgs> args) { int num_samples = args.size(); float *norm_add_cpu = ctx.scratchpad->AllocatePinned<float>(num_samples * nchannels_); float *norm_mul_cpu = ctx.scratchpad->AllocatePinned<float>(num_samples * nchannels_); Out *fill_values_cpu = ctx.scratchpad->AllocatePinned<Out>(num_samples * out_nchannels_); for (int i = 0; i < num_samples; i++) { const auto &sample_arg = args[i]; auto *norm_add_data = norm_add_cpu + i * nchannels_; auto *norm_mul_data = norm_mul_cpu + i * nchannels_; int mean_sz = sample_arg.mean.size(); assert(mean_sz == sample_arg.inv_stddev.size()); int c = 0; for (; c < mean_sz; c++) { norm_add_data[c] = -sample_arg.mean[c] * sample_arg.inv_stddev[c]; norm_mul_data[c] = sample_arg.inv_stddev[c]; } for (; c < nchannels_; c++) { norm_add_data[c] = 0.0f; norm_mul_data[c] = 1.0f; } auto *fill_values_data = fill_values_cpu + i * out_nchannels_; int fill_values_sz = sample_arg.fill_values.size(); c = 0; for (; c < fill_values_sz; c++) fill_values_data[c] = ConvertSat<Out>(sample_arg.fill_values[c]); for (; c < out_nchannels_; c++) fill_values_data[c] = ConvertSat<Out>(0.0f); } return ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, make_span(norm_add_cpu, num_samples * nchannels_), make_span(norm_mul_cpu, num_samples * nchannels_), make_span(fill_values_cpu, num_samples * out_nchannels_)); } template <typename Out> auto SliceHwc2HwcChwNormalizeGPU<Out>::RealignSample( TensorView<StorageGPU, const In, ndim> in_sample, Roi<spatial_dim> roi) -> std::tuple<TensorView<StorageGPU, const In, ndim>, Roi<spatial_dim>> { const auto *data = in_sample.data; auto shape = in_sample.shape; // skip the cropped rows data += roi.lo.y * shape[1] * shape[2]; shape[0] = roi.extent().y; // skip the cropped columns data += roi.lo.x * shape[2]; return {TensorView<StorageGPU, const In, ndim>{data, shape}, {ivec<spatial_dim>{0}, roi.extent()}}; } template <typename Out> void SliceHwc2HwcChwNormalizeGPU<Out>::SetupNumChannels(const TensorListShape<ndim> &input_shape, span<const SampleArgs> args) { if (input_shape.num_samples() == 0) { return; } const auto first_shape = input_shape.tensor_shape_span(0); nchannels_ = first_shape[channel_dim]; for (int i = 1; i < input_shape.num_samples(); i++) { int ch = input_shape.tensor_shape_span(i)[channel_dim]; DALI_ENFORCE(nchannels_ == ch, make_string("All samples should have the same number of channels, expected ", nchannels_, " channels, got ", ch, " channels in sample ", i)); } DALI_ENFORCE( input_shape.num_samples() == static_cast<int>(args.size()), "Number of samples in the arguments should match the number of samples in the shape."); out_nchannels_ = std::max(nchannels_, static_cast<int>(args[0].fill_values.size())); for (int i = 1; i < input_shape.num_samples(); i++) { DALI_ENFORCE(args[i].fill_values.size() == args[0].fill_values.size(), "All sample arguments should have the same number of fill values."); } DALI_ENFORCE(nchannels_ == kStaticChannels, "Only 3 input channels are supported."); if (output_layout_ == "HWC") { // Padding in the operator cannot go higher than the closest power of 2, // but better have the check in place. DALI_ENFORCE(out_nchannels_ == kStaticChannels || out_nchannels_ == kStaticChannels + 1, "Only 3 or 4 output channels are supported for HWC output layout."); } } template <typename Out> void SliceHwc2HwcChwNormalizeGPU<Out>::Run(KernelContext &ctx, const TensorListView<StorageGPU, Out, ndim> &out, const TensorListView<StorageGPU, const In, ndim> &in, span<const SampleArgs> args) { using SampleDesc = Hwc2HwcChwSampleDesc<Out, In>; int num_samples = in.num_samples(); SampleDesc *sample_descs_cpu = ctx.scratchpad->AllocatePinned<SampleDesc>(num_samples); uint32_t *first_blocks_cpu = ctx.scratchpad->AllocatePinned<uint32_t>(num_samples); auto [norm_add_gpu, norm_mul_gpu, fill_values_gpu] = SetupParams(ctx, args); bool need_pad = out_nchannels_ != nchannels_; bool need_crop_x = false; bool need_flip_x = false; // Check if all the outputs are aligned to 4 bytes, used by the specialized FP16 PAD HWC -> HWC // implementation. With the current state of DALI, the start of output allocation is aligned // (to even higher power of two), and all the samples have length that is multiple of 4 (padded to // 4 channels), that is if they are in contiguous allocation, all output samples are still aligned // to a multiple of 4. bool outputs_aligned_4 = true; uint32_t offset_blk = 0; int nonempty_samples = 0; for (int sample_id = 0; sample_id < num_samples; sample_id++) { auto [in_sample, in_roi] = RealignSample(in[sample_id], args[sample_id].roi); // we adjusted the in_roi to start from 0, so roi.extent() == roi.hi if (in_sample.shape[1] != in_roi.hi.x) { need_crop_x = true; } int64_t sample_size = collapsed_tiling_shape_[sample_id][0]; if (sample_size == 0) { continue; } auto &sample_desc = sample_descs_cpu[nonempty_samples]; auto &first_block = first_blocks_cpu[nonempty_samples++]; sample_desc.in = in_sample.data; sample_desc.out = out.tensor_data(sample_id); if (reinterpret_cast<std::uintptr_t>(sample_desc.out) % 4) { outputs_aligned_4 = false; } first_block = offset_blk; sample_desc.first_block = offset_blk; sample_desc.sample_size = sample_size; offset_blk += div_ceil(sample_size, kBlockSizeMul * kBlockWidth); // The output shape here is after the permutation if (output_layout_ == "CHW") { sample_desc.H = out.tensor_shape(sample_id)[1]; sample_desc.W = out.tensor_shape(sample_id)[2]; sample_desc.C = out.tensor_shape(sample_id)[0]; // out_nchannels_ } else { sample_desc.H = out.tensor_shape(sample_id)[0]; sample_desc.W = out.tensor_shape(sample_id)[1]; sample_desc.C = out.tensor_shape(sample_id)[2]; // out_nchannels_ } sample_desc.input_W = in_sample.shape[1]; sample_desc.input_C = in_sample.shape[2]; // nchannels_ sample_desc.norm_add = norm_add_gpu + sample_id * nchannels_; sample_desc.norm_mul = norm_mul_gpu + sample_id * nchannels_; sample_desc.fill_values = fill_values_gpu + sample_id * out_nchannels_; sample_desc.flip_x = args[sample_id].flip_x; if (args[sample_id].flip_x) { need_flip_x = true; } } if (nonempty_samples == 0) return; auto [sample_descs_gpu, first_blocks_gpu] = ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, make_span(sample_descs_cpu, nonempty_samples), make_span(first_blocks_cpu, nonempty_samples)); // TODO(klecki): Maybe this selection can be simplified, but making the output layout // a parameter would probably make it even less readable. // This version allows utilizing specialized implementations for every layout more easily. if (output_layout_ == "CHW") { auto dispatch = [samples = sample_descs_gpu, blocks = first_blocks_gpu, &ctx, need_crop_x, offset_blk, nonempty_samples](auto pad_v, auto flip_x_v) { if (need_crop_x) { SliceHwc2HwcChwNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels> <<<offset_blk, kThreadBlockSize, 0, ctx.gpu.stream>>>(samples, blocks, nonempty_samples); } else { Hwc2HwcChwNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels><<<offset_blk, kThreadBlockSize, 0, ctx.gpu.stream>>>( samples, blocks, nonempty_samples); } }; auto dispatch_flip = [&](auto pad_v, bool flip_x) { if (flip_x) { dispatch(pad_v, std::true_type{}); } else { dispatch(pad_v, std::false_type{}); } }; if (need_pad) { dispatch_flip(std::true_type{}, need_flip_x); } else { dispatch_flip(std::false_type{}, need_flip_x); } } else { auto dispatch = [samples = sample_descs_gpu, blocks = first_blocks_gpu, &ctx, need_crop_x, offset_blk, nonempty_samples](auto pad_v, auto flip_x_v, auto out_aligned_v) { if (need_crop_x) { SliceHwc2HwcNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels><<<offset_blk, kThreadBlockSize, 0, ctx.gpu.stream>>>( samples, blocks, nonempty_samples); } else { if constexpr (std::is_same_v<Out, float16> && pad_v.value && out_aligned_v.value) { Hwc2HwcNormalizePadFp16<Out, In, flip_x_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels> <<<offset_blk, kThreadBlockSize, 0, ctx.gpu.stream>>>(samples, blocks, nonempty_samples); } else { Hwc2HwcNormalize<Out, In, flip_x_v.value, pad_v.value, kBlockSizeMul * kBlockWidth, kStaticChannels><<<offset_blk, kThreadBlockSize, 0, ctx.gpu.stream>>>( samples, blocks, nonempty_samples); } } }; auto dispatch_aligned = [&](auto pad_v, auto flip_x_v, bool out_aligned) { if (out_aligned) { dispatch(pad_v, flip_x_v, std::true_type{}); } else { dispatch(pad_v, flip_x_v, std::false_type{}); } }; auto dispatch_flip = [&](auto pad_v, bool flip_x, bool out_aligned) { if (flip_x) { dispatch_aligned(pad_v, std::true_type{}, out_aligned); } else { dispatch_aligned(pad_v, std::false_type{}, out_aligned); } }; if (need_pad) { dispatch_flip(std::true_type{}, need_flip_x, outputs_aligned_4); } else { dispatch_flip(std::false_type{}, need_flip_x, outputs_aligned_4); } } CUDA_CALL(cudaGetLastError()); } template class DLL_PUBLIC SliceHwc2HwcChwNormalizeGPU<float>; template class DLL_PUBLIC SliceHwc2HwcChwNormalizeGPU<float16>; } // namespace slice_flip_normalize } // namespace kernels } // namespace dali
68baabf3568358581ba1d49165c5c01a155d5964.hip
// !!! This is a file automatically generated by hipify!!! #include <gauge_field.h> #include <color_spinor_field.h> #include <clover_field.h> #include <dslash.h> #include <worker.h> #include <dslash_policy.cuh> #include <kernels/dslash_wilson_clover.cuh> /** This is the Wilson-clover linear operator */ namespace quda { template <typename Arg> class WilsonClover : public Dslash<wilsonClover, Arg> { using Dslash = Dslash<wilsonClover, Arg>; using Dslash::arg; using Dslash::in; public: WilsonClover(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash(arg, out, in) {} void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); Dslash::setParam(tp); if (arg.xpay) Dslash::template instantiate<packShmem, true>(tp, stream); else errorQuda("Wilson-clover operator only defined for xpay=true"); } long long flops() const { int clover_flops = 504; long long flops = Dslash::flops(); switch (arg.kernel_type) { case INTERIOR_KERNEL: case KERNEL_POLICY: flops += clover_flops * in.Volume(); break; default: break; // all clover flops are in the interior kernel } return flops; } long long bytes() const { bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false; int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0); long long bytes = Dslash::bytes(); switch (arg.kernel_type) { case INTERIOR_KERNEL: case KERNEL_POLICY: bytes += clover_bytes * in.Volume(); break; default: break; } return bytes; } }; template <typename Float, int nColor, QudaReconstructType recon> struct WilsonCloverApply { inline WilsonCloverApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; WilsonCloverArg<Float, nColor, nDim, recon> arg(out, in, U, A, a, 0.0, x, parity, dagger, comm_override); WilsonClover<decltype(arg)> wilson(arg, out, in); dslash::DslashPolicyTune<decltype(wilson)> policy(wilson, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; template <typename Float, int nColor, QudaReconstructType recon> struct WilsonCloverWithTwistApply { inline WilsonCloverWithTwistApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &A, double a, double b, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; WilsonCloverArg<Float, nColor, nDim, recon, true> arg(out, in, U, A, a, b, x, parity, dagger, comm_override); WilsonClover<decltype(arg)> wilson(arg, out, in); dslash::DslashPolicyTune<decltype(wilson)> policy( wilson, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; // Apply the Wilson-clover operator // out(x) = M*in = (A(x) + a * \sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)) // Uses the kappa normalization for the Wilson operator. void ApplyWilsonClover(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { #ifdef GPU_CLOVER_DIRAC if (in.V() == out.V()) errorQuda("Aliasing pointers"); if (in.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder()); // check all precisions match checkPrecision(out, in, U, A); // check all locations match checkLocation(out, in, U, A); instantiate<WilsonCloverApply>(out, in, U, A, a, x, parity, dagger, comm_override, profile); #else errorQuda("Clover dslash has not been built"); #endif } } // namespace quda
68baabf3568358581ba1d49165c5c01a155d5964.cu
#include <gauge_field.h> #include <color_spinor_field.h> #include <clover_field.h> #include <dslash.h> #include <worker.h> #include <dslash_policy.cuh> #include <kernels/dslash_wilson_clover.cuh> /** This is the Wilson-clover linear operator */ namespace quda { template <typename Arg> class WilsonClover : public Dslash<wilsonClover, Arg> { using Dslash = Dslash<wilsonClover, Arg>; using Dslash::arg; using Dslash::in; public: WilsonClover(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash(arg, out, in) {} void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); Dslash::setParam(tp); if (arg.xpay) Dslash::template instantiate<packShmem, true>(tp, stream); else errorQuda("Wilson-clover operator only defined for xpay=true"); } long long flops() const { int clover_flops = 504; long long flops = Dslash::flops(); switch (arg.kernel_type) { case INTERIOR_KERNEL: case KERNEL_POLICY: flops += clover_flops * in.Volume(); break; default: break; // all clover flops are in the interior kernel } return flops; } long long bytes() const { bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false; int clover_bytes = 72 * in.Precision() + (isFixed ? 2 * sizeof(float) : 0); long long bytes = Dslash::bytes(); switch (arg.kernel_type) { case INTERIOR_KERNEL: case KERNEL_POLICY: bytes += clover_bytes * in.Volume(); break; default: break; } return bytes; } }; template <typename Float, int nColor, QudaReconstructType recon> struct WilsonCloverApply { inline WilsonCloverApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; WilsonCloverArg<Float, nColor, nDim, recon> arg(out, in, U, A, a, 0.0, x, parity, dagger, comm_override); WilsonClover<decltype(arg)> wilson(arg, out, in); dslash::DslashPolicyTune<decltype(wilson)> policy(wilson, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; template <typename Float, int nColor, QudaReconstructType recon> struct WilsonCloverWithTwistApply { inline WilsonCloverWithTwistApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &A, double a, double b, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { constexpr int nDim = 4; WilsonCloverArg<Float, nColor, nDim, recon, true> arg(out, in, U, A, a, b, x, parity, dagger, comm_override); WilsonClover<decltype(arg)> wilson(arg, out, in); dslash::DslashPolicyTune<decltype(wilson)> policy( wilson, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(), in.GhostFaceCB(), profile); policy.apply(0); checkCudaError(); } }; // Apply the Wilson-clover operator // out(x) = M*in = (A(x) + a * \sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)) // Uses the kappa normalization for the Wilson operator. void ApplyWilsonClover(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, const CloverField &A, double a, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile) { #ifdef GPU_CLOVER_DIRAC if (in.V() == out.V()) errorQuda("Aliasing pointers"); if (in.FieldOrder() != out.FieldOrder()) errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder()); // check all precisions match checkPrecision(out, in, U, A); // check all locations match checkLocation(out, in, U, A); instantiate<WilsonCloverApply>(out, in, U, A, a, x, parity, dagger, comm_override, profile); #else errorQuda("Clover dslash has not been built"); #endif } } // namespace quda
e715bfbc2c485b1e88799859c39d63249ad46d4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "caffe2/core/operator.h" #include "modules/detectron/spatial_narrow_as_op.h" namespace caffe2 { namespace { template <typename T> __global__ void CopyKernel( const int N, const int C, const int in_H, const int in_W, const int out_H, const int out_W, const T* in_data, T* out_data) { CUDA_1D_KERNEL_LOOP(index, N * C * out_H * out_W) { int w = index % out_W; int h = (index / out_W) % out_H; int c = (index / out_W / out_H) % C; int n = (index / out_W / out_H / C); int in_index = n * C * in_H * in_W + c * in_H * in_W + h * in_W + w; int out_index = n * C * out_H * out_W + c * out_H * out_W + h * out_W + w; out_data[out_index] = in_data[in_index]; } } template <typename T> __global__ void CopyGradientKernel( const int N, const int C, const int in_H, const int in_W, const int out_H, const int out_W, const T* in_data, T* out_data) { CUDA_1D_KERNEL_LOOP(index, N * C * in_H * in_W) { int w = index % in_W; int h = (index / in_W) % in_H; int c = (index / in_W / in_H) % C; int n = (index / in_W / in_H / C); int in_index = n * C * in_H * in_W + c * in_H * in_W + h * in_W + w; int out_index = n * C * out_H * out_W + c * out_H * out_W + h * out_W + w; out_data[out_index] = in_data[in_index]; } } } // namespace template <> bool SpatialNarrowAsOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float_t, int32_t>>::call(this, Input(0)); } template <> template <typename T> bool SpatialNarrowAsOp<CUDAContext>::DoRunWithType() { // Narrows input 0 (A) spatially to match input 1 (B) auto& A = Input(0); auto& B = Input(1); CAFFE_ENFORCE_EQ(A.dim32(0), B.dim32(0), "Input dim 0 must be equal."); std::vector<int64_t> sizes; if (A.ndim() == B.ndim()) { CAFFE_ENFORCE_EQ(A.dim32(1), B.dim32(1), "Input dim 1 must be equal."); CAFFE_ENFORCE_GE( A.dim32(2), B.dim32(2), "Input 0 height must be >= input 1 height."); CAFFE_ENFORCE_GE( A.dim32(3), B.dim32(3), "Input 0 width must be >= input 1 width."); sizes = B.sizes().vec(); } else { // For (N, H, W) case CAFFE_ENFORCE_EQ(A.ndim() - 1, B.ndim(), "Dimension mismatch."); CAFFE_ENFORCE_GE( A.dim32(2), B.dim32(1), "Input 0 height must be >= input 1 height."); CAFFE_ENFORCE_GE( A.dim32(3), B.dim32(2), "Input 0 width must be >= input 1 width."); sizes = {A.dim32(0), A.dim32(1), B.dim32(1), B.dim32(2)}; } auto* C = Output(0, sizes, at::dtype<T>()); int out_width = C->dim32(3); int out_height = C->dim32(2); int in_width = A.dim32(3); int in_height = A.dim32(2); hipLaunchKernelGGL(( CopyKernel<T>), dim3(CAFFE_GET_BLOCKS(C->size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), C->dim32(0), C->dim32(1), in_height, in_width, out_height, out_width, A.template data<T>(), C->template mutable_data<T>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } template <> bool SpatialNarrowAsGradientOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float_t, int32_t>>::call(this, Input(0)); } template <> template <typename T> bool SpatialNarrowAsGradientOp<CUDAContext>::DoRunWithType() { auto& A = Input(0); auto& B = Input(1); auto& dC = Input(2); // Gradient of net w.r.t. output of forward op auto* dA = Output(0, A.sizes(), at::dtype<T>()); // Gradient of net w.r.t. input to forward op math::Set<T, CUDAContext>( dA->size(), 0.f, dA->template mutable_data<T>(), &context_); int out_width = dA->dim32(3); int out_height = dA->dim32(2); int in_width = dC.dim32(3); int in_height = dC.dim32(2); hipLaunchKernelGGL(( CopyGradientKernel<T>), dim3(CAFFE_GET_BLOCKS(dC.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dA->dim32(0), dA->dim32(1), in_height, in_width, out_height, out_width, dC.template data<T>(), dA->template mutable_data<T>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR(SpatialNarrowAs, SpatialNarrowAsOp<CUDAContext>); REGISTER_CUDA_OPERATOR( SpatialNarrowAsGradient, SpatialNarrowAsGradientOp<CUDAContext>); } // namespace caffe2
e715bfbc2c485b1e88799859c39d63249ad46d4d.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "caffe2/core/operator.h" #include "modules/detectron/spatial_narrow_as_op.h" namespace caffe2 { namespace { template <typename T> __global__ void CopyKernel( const int N, const int C, const int in_H, const int in_W, const int out_H, const int out_W, const T* in_data, T* out_data) { CUDA_1D_KERNEL_LOOP(index, N * C * out_H * out_W) { int w = index % out_W; int h = (index / out_W) % out_H; int c = (index / out_W / out_H) % C; int n = (index / out_W / out_H / C); int in_index = n * C * in_H * in_W + c * in_H * in_W + h * in_W + w; int out_index = n * C * out_H * out_W + c * out_H * out_W + h * out_W + w; out_data[out_index] = in_data[in_index]; } } template <typename T> __global__ void CopyGradientKernel( const int N, const int C, const int in_H, const int in_W, const int out_H, const int out_W, const T* in_data, T* out_data) { CUDA_1D_KERNEL_LOOP(index, N * C * in_H * in_W) { int w = index % in_W; int h = (index / in_W) % in_H; int c = (index / in_W / in_H) % C; int n = (index / in_W / in_H / C); int in_index = n * C * in_H * in_W + c * in_H * in_W + h * in_W + w; int out_index = n * C * out_H * out_W + c * out_H * out_W + h * out_W + w; out_data[out_index] = in_data[in_index]; } } } // namespace template <> bool SpatialNarrowAsOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float_t, int32_t>>::call(this, Input(0)); } template <> template <typename T> bool SpatialNarrowAsOp<CUDAContext>::DoRunWithType() { // Narrows input 0 (A) spatially to match input 1 (B) auto& A = Input(0); auto& B = Input(1); CAFFE_ENFORCE_EQ(A.dim32(0), B.dim32(0), "Input dim 0 must be equal."); std::vector<int64_t> sizes; if (A.ndim() == B.ndim()) { CAFFE_ENFORCE_EQ(A.dim32(1), B.dim32(1), "Input dim 1 must be equal."); CAFFE_ENFORCE_GE( A.dim32(2), B.dim32(2), "Input 0 height must be >= input 1 height."); CAFFE_ENFORCE_GE( A.dim32(3), B.dim32(3), "Input 0 width must be >= input 1 width."); sizes = B.sizes().vec(); } else { // For (N, H, W) case CAFFE_ENFORCE_EQ(A.ndim() - 1, B.ndim(), "Dimension mismatch."); CAFFE_ENFORCE_GE( A.dim32(2), B.dim32(1), "Input 0 height must be >= input 1 height."); CAFFE_ENFORCE_GE( A.dim32(3), B.dim32(2), "Input 0 width must be >= input 1 width."); sizes = {A.dim32(0), A.dim32(1), B.dim32(1), B.dim32(2)}; } auto* C = Output(0, sizes, at::dtype<T>()); int out_width = C->dim32(3); int out_height = C->dim32(2); int in_width = A.dim32(3); int in_height = A.dim32(2); CopyKernel<T><<< CAFFE_GET_BLOCKS(C->size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( C->dim32(0), C->dim32(1), in_height, in_width, out_height, out_width, A.template data<T>(), C->template mutable_data<T>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } template <> bool SpatialNarrowAsGradientOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float_t, int32_t>>::call(this, Input(0)); } template <> template <typename T> bool SpatialNarrowAsGradientOp<CUDAContext>::DoRunWithType() { auto& A = Input(0); auto& B = Input(1); auto& dC = Input(2); // Gradient of net w.r.t. output of forward op auto* dA = Output(0, A.sizes(), at::dtype<T>()); // Gradient of net w.r.t. input to forward op math::Set<T, CUDAContext>( dA->size(), 0.f, dA->template mutable_data<T>(), &context_); int out_width = dA->dim32(3); int out_height = dA->dim32(2); int in_width = dC.dim32(3); int in_height = dC.dim32(2); CopyGradientKernel<T><<< CAFFE_GET_BLOCKS(dC.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dA->dim32(0), dA->dim32(1), in_height, in_width, out_height, out_width, dC.template data<T>(), dA->template mutable_data<T>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR(SpatialNarrowAs, SpatialNarrowAsOp<CUDAContext>); REGISTER_CUDA_OPERATOR( SpatialNarrowAsGradient, SpatialNarrowAsGradientOp<CUDAContext>); } // namespace caffe2
1ebc1dbb0475034f74b94f85f6d0c7b3c70a0552.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unistd.h> #include <stdlib.h> #include<cuda.h> #define N 1000 //__host__ => only callable by host //__device__ => only callable by device //__global__ 0 => only callable by host __global__ void mat_vec_mul(float *A, float *x, florat *c, int N){ // blockIdx ,blockDim, threadIdx are provided by cuda environment int idx = blockIdx.x * blockDim*x + threadIdx.x ; //index the thread is working on int i; if(idx >N){ c[idx]=0.0; for(int i=0;i<N;i++){ c[idx] += A[idx * N * i]* x[i]; } } } int main( ){ float *A_h, *x_h, *c_h; //Host memory float *A_d, *x_d, *c_d; //device memory A_h= (float *) malloc(sizeof(float)*N*N); //matrix memory on host x_h= (float *) malloc(sizeof(float)*N); //x vector memory on host c_h= (float *) malloc(sizeof(float)*N); //c vector memory on host hipMalloc((void**) &A_d, sizeof(float)*N*N); //matrix memory on device hipMalloc((void**) &x_d, sizeof(float)*N); //x vector memory on device hipMalloc((void**) &c_d, sizeof(float)*N); //c vector memory on device for(int i=0;i<N;i++) for(int j=0;j<N;j++) A_h[i*N+j]=1.0; for(int j=0;j<N;j++){ x_h[j]=1.0; } hipMemcpy(A_d,A_h,sizeof(float)*N*N,hipMemcpyHostToDevice); hipMemcpy(x_d,x_h,sizeof(float)*N,hipMemcpyHostToDevice); //Do actuall work int block_size=32; int n_block=N/block_size + (N % block_size==0 ? 0 : 1); hipLaunchKernelGGL(( mat_vec_mul) , dim3(n_blocks) , dim3(block_size), 0, 0, A_d,x_d,c_d,N); hipMemcpy(c_d,x_h,sizeof(float)*N*N,hipMemcpyHostToHost); for(int i=0;i<N;i++){ printf("%d _ %f", i ,c_h[i]); } free(A_h); free(x_h); free(c_h); hipFree(A_h); hipFree(x_h); hipFree(c_h); return 0; }
1ebc1dbb0475034f74b94f85f6d0c7b3c70a0552.cu
#include <unistd.h> #include <stdlib.h> #include<cuda.h> #define N 1000 //__host__ => only callable by host //__device__ => only callable by device //__global__ 0 => only callable by host __global__ void mat_vec_mul(float *A, float *x, florat *c, int N){ // blockIdx ,blockDim, threadIdx are provided by cuda environment int idx = blockIdx.x * blockDim*x + threadIdx.x ; //index the thread is working on int i; if(idx >N){ c[idx]=0.0; for(int i=0;i<N;i++){ c[idx] += A[idx * N * i]* x[i]; } } } int main( ){ float *A_h, *x_h, *c_h; //Host memory float *A_d, *x_d, *c_d; //device memory A_h= (float *) malloc(sizeof(float)*N*N); //matrix memory on host x_h= (float *) malloc(sizeof(float)*N); //x vector memory on host c_h= (float *) malloc(sizeof(float)*N); //c vector memory on host cudaMalloc((void**) &A_d, sizeof(float)*N*N); //matrix memory on device cudaMalloc((void**) &x_d, sizeof(float)*N); //x vector memory on device cudaMalloc((void**) &c_d, sizeof(float)*N); //c vector memory on device for(int i=0;i<N;i++) for(int j=0;j<N;j++) A_h[i*N+j]=1.0; for(int j=0;j<N;j++){ x_h[j]=1.0; } cudaMemcpy(A_d,A_h,sizeof(float)*N*N,cudaMemcpyHostToDevice); cudaMemcpy(x_d,x_h,sizeof(float)*N,cudaMemcpyHostToDevice); //Do actuall work int block_size=32; int n_block=N/block_size + (N % block_size==0 ? 0 : 1); mat_vec_mul <<<n_blocks , block_size>>> (A_d,x_d,c_d,N); cudaMemcpy(c_d,x_h,sizeof(float)*N*N,cudaMemcpyHostToHost); for(int i=0;i<N;i++){ printf("%d _ %f", i ,c_h[i]); } free(A_h); free(x_h); free(c_h); cudaFree(A_h); cudaFree(x_h); cudaFree(c_h); return 0; }
86c6cb261508d22924b4826b4d5c142f3003eca6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __global__ void MaskedIm2colForward(const int n, const scalar_t *data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_col) { // mask_cnt * channels CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_col = mask_h_idx[m_index]; const int w_col = mask_w_idx[m_index]; const int c_im = index / mask_cnt; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col - pad_h; const int w_offset = w_col - pad_w; scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index; for (int i = 0; i < kernel_h; ++i) { int h_im = h_offset + i; for (int j = 0; j < kernel_w; ++j) { int w_im = w_offset + j; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { *data_col_ptr = (scalar_t)data_im[(c_im * height + h_im) * width + w_im]; } else { *data_col_ptr = 0.0; } data_col_ptr += mask_cnt; } } } } int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.type(), "MaskedIm2colLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); hipLaunchKernelGGL(( MaskedIm2colForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_data_, height, width, kernel_h, kernel_w, pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); THCudaCheck(hipGetLastError()); return 1; } template <typename scalar_t> __global__ void MaskedCol2imForward(const int n, const scalar_t *data_col, const int height, const int width, const int channels, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_im = mask_h_idx[m_index]; const int w_im = mask_w_idx[m_index]; const int c_im = index / mask_cnt; // int kernel_extent_w = (kernel_w - 1) + 1; // int kernel_extent_h = (kernel_h - 1) + 1; // compute the start and end of the output data_im[(c_im * height + h_im) * width + w_im] = data_col[index]; } } int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.type(), "MaskedCol2imLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); hipLaunchKernelGGL(( MaskedCol2imForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_data_, height, width, channels, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); THCudaCheck(hipGetLastError()); return 1; }
86c6cb261508d22924b4826b4d5c142f3003eca6.cu
#include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __global__ void MaskedIm2colForward(const int n, const scalar_t *data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_col) { // mask_cnt * channels CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_col = mask_h_idx[m_index]; const int w_col = mask_w_idx[m_index]; const int c_im = index / mask_cnt; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col - pad_h; const int w_offset = w_col - pad_w; scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index; for (int i = 0; i < kernel_h; ++i) { int h_im = h_offset + i; for (int j = 0; j < kernel_w; ++j) { int w_im = w_offset + j; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { *data_col_ptr = (scalar_t)data_im[(c_im * height + h_im) * width + w_im]; } else { *data_col_ptr = 0.0; } data_col_ptr += mask_cnt; } } } } int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.type(), "MaskedIm2colLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); MaskedIm2colForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data_, height, width, kernel_h, kernel_w, pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); THCudaCheck(cudaGetLastError()); return 1; } template <typename scalar_t> __global__ void MaskedCol2imForward(const int n, const scalar_t *data_col, const int height, const int width, const int channels, const long *mask_h_idx, const long *mask_w_idx, const int mask_cnt, scalar_t *data_im) { CUDA_1D_KERNEL_LOOP(index, n) { const int m_index = index % mask_cnt; const int h_im = mask_h_idx[m_index]; const int w_im = mask_w_idx[m_index]; const int c_im = index / mask_cnt; // int kernel_extent_w = (kernel_w - 1) + 1; // int kernel_extent_h = (kernel_h - 1) + 1; // compute the start and end of the output data_im[(c_im * height + h_im) * width + w_im] = data_col[index]; } } int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height, const int width, const int channels, const at::Tensor mask_h_idx, const at::Tensor mask_w_idx, const int mask_cnt, at::Tensor top_data) { const int output_size = mask_cnt * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.type(), "MaskedCol2imLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data<scalar_t>(); const long *mask_h_idx_ = mask_h_idx.data<long>(); const long *mask_w_idx_ = mask_w_idx.data<long>(); scalar_t *top_data_ = top_data.data<scalar_t>(); MaskedCol2imForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data_, height, width, channels, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); THCudaCheck(cudaGetLastError()); return 1; }
42e0dacc56aa9cc780432415ea3f72f7bf8a43c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Author Oleksandr Borysov * Task2 */ #include <stdio.h> #include <stdlib.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <math.h> #include <time.h> #include <mpi.h> #define MAX 32767 #define PLOT_DATA_FILE "plot_data2.txt" __global__ void getCounts(unsigned long* result, unsigned long* steps, unsigned long* seed) { double x, y, z; unsigned int count = 0; hiprandState_t state; hiprand_init(*seed, 0, 0, &state); for (unsigned long i = 0; i < *steps; ++i) { x = ((double)((hiprand(&state)) % MAX)) / MAX; y = ((double)((hiprand(&state)) % MAX)) / MAX; z = sqrt((x * x) + (y * y)); if (z <= 1) { ++count; } } *result = count; } int main(int argc, char* argv[]) { unsigned int stepNumber; int myid, numprocs, dev_used; unsigned long procStep, result, seed, resault, count; hipError_t cudaStatus; double minTime, maxTime, avrTime, elapse_time = 0.0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myid); MPI_Comm_size(MPI_COMM_WORLD, &numprocs); if (myid == 0) { printf("Type number of steps \n"); scanf("%u", &stepNumber); procStep = stepNumber / numprocs; } MPI_Bcast(&procStep, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); elapse_time = -MPI_Wtime(); hipSetDevice(myid); hipGetDevice(&dev_used); // Find which GPU is being used printf("myid = %d: device used = %d\n", myid, dev_used); unsigned long *d_procStep, *d_result, *d_seed; //---------------- cudaStatus = hipMalloc(&d_result, sizeof(long)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc d_result failed!"); goto Error; } cudaStatus = hipMalloc(&d_procStep, sizeof(long)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc d_procStep failed!"); goto Error; } cudaStatus = hipMalloc(&d_seed, sizeof(long)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc d_seed failed!"); goto Error; } // Genereterandom seed seed = time(NULL); cudaStatus = hipMemcpy(d_seed, &seed, sizeof(long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy d_seed failed!"); goto Error; } cudaStatus = hipMemcpy(d_procStep, &procStep, sizeof(long), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy d_procStep failed!"); goto Error; } // run CUDA method hipLaunchKernelGGL(( getCounts) , dim3(1), dim3(1), 0, 0, d_result, d_procStep, d_seed); cudaStatus = hipMemcpy(&result, d_result, sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy d_result failed!"); goto Error; } //-------------- MPI_Reduce(&result, &count, 1, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD); elapse_time += MPI_Wtime(); MPI_Reduce(&elapse_time, &minTime, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce(&elapse_time, &maxTime, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); MPI_Reduce(&elapse_time, &avrTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); Error: hipFree(d_result); hipFree(d_procStep); hipFree(d_seed); if (myid == 0 && cudaStatus == 0) { int steps = procStep * numprocs; printf("Calculated PI is = %f.\n Time= %f\n", ((double)count / steps) * 4.0, elapse_time); avrTime /= numprocs; FILE* dataPlotFile; dataPlotFile = fopen(PLOT_DATA_FILE, "a"); fprintf(dataPlotFile, "%d %f %f %f %d\n", numprocs, maxTime, avrTime, minTime, steps); fclose(dataPlotFile); } MPI_Finalize(); }
42e0dacc56aa9cc780432415ea3f72f7bf8a43c5.cu
/* * Author Oleksandr Borysov * Task2 */ #include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #include <math.h> #include <time.h> #include <mpi.h> #define MAX 32767 #define PLOT_DATA_FILE "plot_data2.txt" __global__ void getCounts(unsigned long* result, unsigned long* steps, unsigned long* seed) { double x, y, z; unsigned int count = 0; curandState_t state; curand_init(*seed, 0, 0, &state); for (unsigned long i = 0; i < *steps; ++i) { x = ((double)((curand(&state)) % MAX)) / MAX; y = ((double)((curand(&state)) % MAX)) / MAX; z = sqrt((x * x) + (y * y)); if (z <= 1) { ++count; } } *result = count; } int main(int argc, char* argv[]) { unsigned int stepNumber; int myid, numprocs, dev_used; unsigned long procStep, result, seed, resault, count; cudaError_t cudaStatus; double minTime, maxTime, avrTime, elapse_time = 0.0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myid); MPI_Comm_size(MPI_COMM_WORLD, &numprocs); if (myid == 0) { printf("Type number of steps \n"); scanf("%u", &stepNumber); procStep = stepNumber / numprocs; } MPI_Bcast(&procStep, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); elapse_time = -MPI_Wtime(); cudaSetDevice(myid); cudaGetDevice(&dev_used); // Find which GPU is being used printf("myid = %d: device used = %d\n", myid, dev_used); unsigned long *d_procStep, *d_result, *d_seed; //---------------- cudaStatus = cudaMalloc(&d_result, sizeof(long)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc d_result failed!"); goto Error; } cudaStatus = cudaMalloc(&d_procStep, sizeof(long)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc d_procStep failed!"); goto Error; } cudaStatus = cudaMalloc(&d_seed, sizeof(long)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc d_seed failed!"); goto Error; } // Genereterandom seed seed = time(NULL); cudaStatus = cudaMemcpy(d_seed, &seed, sizeof(long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy d_seed failed!"); goto Error; } cudaStatus = cudaMemcpy(d_procStep, &procStep, sizeof(long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy d_procStep failed!"); goto Error; } // run CUDA method getCounts <<<1, 1>>>(d_result, d_procStep, d_seed); cudaStatus = cudaMemcpy(&result, d_result, sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy d_result failed!"); goto Error; } //-------------- MPI_Reduce(&result, &count, 1, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD); elapse_time += MPI_Wtime(); MPI_Reduce(&elapse_time, &minTime, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); MPI_Reduce(&elapse_time, &maxTime, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); MPI_Reduce(&elapse_time, &avrTime, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); Error: cudaFree(d_result); cudaFree(d_procStep); cudaFree(d_seed); if (myid == 0 && cudaStatus == 0) { int steps = procStep * numprocs; printf("Calculated PI is = %f.\n Time= %f\n", ((double)count / steps) * 4.0, elapse_time); avrTime /= numprocs; FILE* dataPlotFile; dataPlotFile = fopen(PLOT_DATA_FILE, "a"); fprintf(dataPlotFile, "%d %f %f %f %d\n", numprocs, maxTime, avrTime, minTime, steps); fclose(dataPlotFile); } MPI_Finalize(); }
4e917a1dc8a9681742d25f2cda0bf869548ea154.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <cstdio> #include <chrono> typedef std::chrono::high_resolution_clock Clock; #define ITER 65535 // CPU version of the vector add function void vector_add_cpu(int *a, int *b, int *c, int n) { int i; // Add the vector elements a and b to the vector c for (i = 0; i < n; ++i) { c[i] = a[i] + b[i]; } } // GPU version of the vector add function via "__global__" prefix. // These kind of functions are called kernels in CUDA. When called, they are executed in parallel by N different threads // as opposed to only once in a regular c++ function __global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c) { int i = threadIdx.x; // No for loop needed because the CUDA runtime // will thread this ITER times gpu_c[i] = gpu_a[i] + gpu_b[i]; } int main() { int *a, *b, *c; int *gpu_a, *gpu_b, *gpu_c; a = (int *)malloc(ITER * sizeof(int)); b = (int *)malloc(ITER * sizeof(int)); c = (int *)malloc(ITER * sizeof(int)); // We need variables accessible to the GPU, // so hipMallocManaged provides these hipMallocManaged(&gpu_a, ITER * sizeof(int)); hipMallocManaged(&gpu_b, ITER * sizeof(int)); hipMallocManaged(&gpu_c, ITER * sizeof(int)); for (int i = 0; i < ITER; ++i) { a[i] = i; b[i] = i; c[i] = i; } // Call the CPU function and time it auto cpu_start = Clock::now(); vector_add_cpu(a, b, c, ITER); auto cpu_end = Clock::now(); std::cout << "vector_add_cpu: " << std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count() << " nanoseconds.\n"; // Call the GPU function and time it // The triple angle brakets is a CUDA runtime extension that allows // parameters of a CUDA kernel call to be passed. // In this example, we are passing one thread block with ITER GPU threads. auto gpu_start = Clock::now(); hipLaunchKernelGGL(( vector_add_gpu) , dim3(1), dim3(ITER), 0, 0, gpu_a, gpu_b, gpu_c); hipDeviceSynchronize(); auto gpu_end = Clock::now(); std::cout << "vector_add_gpu: " << std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count() << " nanoseconds.\n"; // Free the GPU-function based memory allocations hipFree(gpu_a); hipFree(gpu_b); hipFree(gpu_c); // Free the CPU-function based memory allocations free(a); free(b); free(c); return 0; }
4e917a1dc8a9681742d25f2cda0bf869548ea154.cu
#include "cuda_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <cstdio> #include <chrono> typedef std::chrono::high_resolution_clock Clock; #define ITER 65535 // CPU version of the vector add function void vector_add_cpu(int *a, int *b, int *c, int n) { int i; // Add the vector elements a and b to the vector c for (i = 0; i < n; ++i) { c[i] = a[i] + b[i]; } } // GPU version of the vector add function via "__global__" prefix. // These kind of functions are called kernels in CUDA. When called, they are executed in parallel by N different threads // as opposed to only once in a regular c++ function __global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c) { int i = threadIdx.x; // No for loop needed because the CUDA runtime // will thread this ITER times gpu_c[i] = gpu_a[i] + gpu_b[i]; } int main() { int *a, *b, *c; int *gpu_a, *gpu_b, *gpu_c; a = (int *)malloc(ITER * sizeof(int)); b = (int *)malloc(ITER * sizeof(int)); c = (int *)malloc(ITER * sizeof(int)); // We need variables accessible to the GPU, // so cudaMallocManaged provides these cudaMallocManaged(&gpu_a, ITER * sizeof(int)); cudaMallocManaged(&gpu_b, ITER * sizeof(int)); cudaMallocManaged(&gpu_c, ITER * sizeof(int)); for (int i = 0; i < ITER; ++i) { a[i] = i; b[i] = i; c[i] = i; } // Call the CPU function and time it auto cpu_start = Clock::now(); vector_add_cpu(a, b, c, ITER); auto cpu_end = Clock::now(); std::cout << "vector_add_cpu: " << std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count() << " nanoseconds.\n"; // Call the GPU function and time it // The triple angle brakets is a CUDA runtime extension that allows // parameters of a CUDA kernel call to be passed. // In this example, we are passing one thread block with ITER GPU threads. auto gpu_start = Clock::now(); vector_add_gpu <<<1, ITER>>> (gpu_a, gpu_b, gpu_c); cudaDeviceSynchronize(); auto gpu_end = Clock::now(); std::cout << "vector_add_gpu: " << std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count() << " nanoseconds.\n"; // Free the GPU-function based memory allocations cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c); // Free the CPU-function based memory allocations free(a); free(b); free(c); return 0; }
c12341f401346646b6bad5312334995e72530093.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel2 (dtype *input, dtype *output, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < n) { scratch[threadIdx.x] = input[i]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = blockDim.x >> 1; s >= 1; s >>= 1) { if ( threadIdx.x < s ) scratch[threadIdx.x] += scratch[threadIdx.x + s]; __syncthreads (); } if(threadIdx.x == 0) { output[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_2, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 2; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype), hipMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); hipDeviceSynchronize (); stopwatch_start (timer); /* execute kernel */ hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s); s = (s + threads - 1) / threads; } hipDeviceSynchronize (); t_kernel_2 = stopwatch_stop (timer); fprintf (stdout, "Time to execute sequential index GPU reduction kernel: %Lg secs\n", t_kernel_2); double bw = (N * sizeof(dtype)) / (t_kernel_2 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype), hipMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
c12341f401346646b6bad5312334995e72530093.cu
#include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel2 (dtype *input, dtype *output, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < n) { scratch[threadIdx.x] = input[i]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = blockDim.x >> 1; s >= 1; s >>= 1) { if ( threadIdx.x < s ) scratch[threadIdx.x] += scratch[threadIdx.x + s]; __syncthreads (); } if(threadIdx.x == 0) { output[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_2, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 2; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype), cudaMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ kernel2 <<<gb, tb>>> (d_idata, d_odata, N); cudaThreadSynchronize (); stopwatch_start (timer); /* execute kernel */ kernel2 <<<gb, tb>>> (d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); kernel2 <<<gb, tb>>> (d_odata, d_odata, s); s = (s + threads - 1) / threads; } cudaThreadSynchronize (); t_kernel_2 = stopwatch_stop (timer); fprintf (stdout, "Time to execute sequential index GPU reduction kernel: %Lg secs\n", t_kernel_2); double bw = (N * sizeof(dtype)) / (t_kernel_2 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype), cudaMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
999871803e5b34221f6896ee06689f255aaf0547.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/bincount_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; inline int GET_BLOCKS(const int N) { return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS; } template <typename T, typename InputT, typename OutT> __global__ void KernelBincount(const InputT* input, const int total_elements, const bool has_weights, const T* weights, OutT* output) { if (!has_weights) { for (int i = threadIdx.x; i < total_elements; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&output[input[i]], 1L); } } else { for (int i = threadIdx.x; i < total_elements; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&output[input[i]], static_cast<OutT>(weights[i])); } } } template <typename Context, typename T, typename InputT> void BincountCUDAInner(const Context& dev_ctx, const DenseTensor& x, const paddle::optional<const DenseTensor&> weights, int minlength, DenseTensor* out) { const DenseTensor* input = &x; DenseTensor* output = out; const InputT* input_data = input->data<InputT>(); const int input_numel = input->numel(); if (input_data == nullptr) { phi::DDim out_dim{0}; output->Resize(out_dim); dev_ctx.template Alloc<T>(output); return; } auto input_x = EigenVector<InputT>::Flatten(*input); DenseTensor input_min_t, input_max_t; input_max_t.Resize({1}); auto* input_max_data = dev_ctx.template Alloc<InputT>(&input_max_t); input_min_t.Resize({1}); auto* input_min_data = dev_ctx.template Alloc<InputT>(&input_min_t); auto input_max_scala = EigenScalar<InputT>::From(input_max_t); auto input_min_scala = EigenScalar<InputT>::From(input_min_t); auto* place = dev_ctx.eigen_device(); input_max_scala.device(*place) = input_x.maximum(); input_min_scala.device(*place) = input_x.minimum(); DenseTensor input_min_cpu, input_max_cpu; paddle::framework::TensorCopySync( input_max_t, phi::CPUPlace(), &input_max_cpu); paddle::framework::TensorCopySync( input_min_t, phi::CPUPlace(), &input_min_cpu); InputT input_min = input_min_cpu.data<InputT>()[0]; PADDLE_ENFORCE_GE( input_min, static_cast<InputT>(0), phi::errors::InvalidArgument( "The elements in input tensor must be non-negative ints")); int64_t output_size = static_cast<int64_t>(input_max_cpu.data<InputT>()[0]) + 1L; output_size = ::max(output_size, static_cast<int64_t>(minlength)); phi::DDim out_dim{output_size}; output->Resize(out_dim); bool has_weights = weights.is_initialized(); const T* weights_data = has_weights ? weights->data<T>() : nullptr; auto stream = dev_ctx.stream(); if (!has_weights) { int64_t* output_data = dev_ctx.template Alloc<int64_t>(output); phi::funcs::SetConstant<Context, int64_t>()(dev_ctx, output, 0L); hipLaunchKernelGGL(( KernelBincount<T, InputT, int64_t>), dim3(GET_BLOCKS(input_numel)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, input_data, input_numel, has_weights, weights_data, output_data); } else { const auto& weights_type = paddle::framework::TransToProtoVarType(weights->dtype()); if (weights->dtype() == DataType::FLOAT32) { float* output_data = dev_ctx.template Alloc<float>(output); phi::funcs::SetConstant<Context, float>()( dev_ctx, output, static_cast<float>(0)); hipLaunchKernelGGL(( KernelBincount<T, InputT, float>), dim3(GET_BLOCKS(input_numel)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, input_data, input_numel, has_weights, weights_data, output_data); } else { double* output_data = dev_ctx.template Alloc<double>(output); phi::funcs::SetConstant<Context, double>()( dev_ctx, output, static_cast<double>(0)); hipLaunchKernelGGL(( KernelBincount<T, InputT, double>), dim3(GET_BLOCKS(input_numel)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, input_data, input_numel, has_weights, weights_data, output_data); } } } template <typename T, typename Context> void BincountKernel(const Context& dev_ctx, const DenseTensor& x, const paddle::optional<const DenseTensor&> weights, int minlength, DenseTensor* out) { if (x.dtype() == DataType::INT32) { BincountCUDAInner<Context, T, int>(dev_ctx, x, weights, minlength, out); } else if (x.dtype() == DataType::INT64) { BincountCUDAInner<Context, T, int64_t>(dev_ctx, x, weights, minlength, out); } } } // namespace phi PD_REGISTER_KERNEL(bincount, GPU, ALL_LAYOUT, phi::BincountKernel, float, double, int, int64_t) {}
999871803e5b34221f6896ee06689f255aaf0547.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/bincount_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; inline int GET_BLOCKS(const int N) { return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS; } template <typename T, typename InputT, typename OutT> __global__ void KernelBincount(const InputT* input, const int total_elements, const bool has_weights, const T* weights, OutT* output) { if (!has_weights) { for (int i = threadIdx.x; i < total_elements; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&output[input[i]], 1L); } } else { for (int i = threadIdx.x; i < total_elements; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&output[input[i]], static_cast<OutT>(weights[i])); } } } template <typename Context, typename T, typename InputT> void BincountCUDAInner(const Context& dev_ctx, const DenseTensor& x, const paddle::optional<const DenseTensor&> weights, int minlength, DenseTensor* out) { const DenseTensor* input = &x; DenseTensor* output = out; const InputT* input_data = input->data<InputT>(); const int input_numel = input->numel(); if (input_data == nullptr) { phi::DDim out_dim{0}; output->Resize(out_dim); dev_ctx.template Alloc<T>(output); return; } auto input_x = EigenVector<InputT>::Flatten(*input); DenseTensor input_min_t, input_max_t; input_max_t.Resize({1}); auto* input_max_data = dev_ctx.template Alloc<InputT>(&input_max_t); input_min_t.Resize({1}); auto* input_min_data = dev_ctx.template Alloc<InputT>(&input_min_t); auto input_max_scala = EigenScalar<InputT>::From(input_max_t); auto input_min_scala = EigenScalar<InputT>::From(input_min_t); auto* place = dev_ctx.eigen_device(); input_max_scala.device(*place) = input_x.maximum(); input_min_scala.device(*place) = input_x.minimum(); DenseTensor input_min_cpu, input_max_cpu; paddle::framework::TensorCopySync( input_max_t, phi::CPUPlace(), &input_max_cpu); paddle::framework::TensorCopySync( input_min_t, phi::CPUPlace(), &input_min_cpu); InputT input_min = input_min_cpu.data<InputT>()[0]; PADDLE_ENFORCE_GE( input_min, static_cast<InputT>(0), phi::errors::InvalidArgument( "The elements in input tensor must be non-negative ints")); int64_t output_size = static_cast<int64_t>(input_max_cpu.data<InputT>()[0]) + 1L; output_size = std::max(output_size, static_cast<int64_t>(minlength)); phi::DDim out_dim{output_size}; output->Resize(out_dim); bool has_weights = weights.is_initialized(); const T* weights_data = has_weights ? weights->data<T>() : nullptr; auto stream = dev_ctx.stream(); if (!has_weights) { int64_t* output_data = dev_ctx.template Alloc<int64_t>(output); phi::funcs::SetConstant<Context, int64_t>()(dev_ctx, output, 0L); KernelBincount<T, InputT, int64_t><<<GET_BLOCKS(input_numel), PADDLE_CUDA_NUM_THREADS, 0, stream>>>( input_data, input_numel, has_weights, weights_data, output_data); } else { const auto& weights_type = paddle::framework::TransToProtoVarType(weights->dtype()); if (weights->dtype() == DataType::FLOAT32) { float* output_data = dev_ctx.template Alloc<float>(output); phi::funcs::SetConstant<Context, float>()( dev_ctx, output, static_cast<float>(0)); KernelBincount<T, InputT, float><<<GET_BLOCKS(input_numel), PADDLE_CUDA_NUM_THREADS, 0, stream>>>( input_data, input_numel, has_weights, weights_data, output_data); } else { double* output_data = dev_ctx.template Alloc<double>(output); phi::funcs::SetConstant<Context, double>()( dev_ctx, output, static_cast<double>(0)); KernelBincount<T, InputT, double><<<GET_BLOCKS(input_numel), PADDLE_CUDA_NUM_THREADS, 0, stream>>>( input_data, input_numel, has_weights, weights_data, output_data); } } } template <typename T, typename Context> void BincountKernel(const Context& dev_ctx, const DenseTensor& x, const paddle::optional<const DenseTensor&> weights, int minlength, DenseTensor* out) { if (x.dtype() == DataType::INT32) { BincountCUDAInner<Context, T, int>(dev_ctx, x, weights, minlength, out); } else if (x.dtype() == DataType::INT64) { BincountCUDAInner<Context, T, int64_t>(dev_ctx, x, weights, minlength, out); } } } // namespace phi PD_REGISTER_KERNEL(bincount, GPU, ALL_LAYOUT, phi::BincountKernel, float, double, int, int64_t) {}
357849ee3ca0c6ee7e80f0d86aa74ac421b7e45f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file bounding_box.cu * \brief Bounding box util functions and operators * \author Joshua Zhang */ #include <hipcub/hipcub.hpp> #include "./bounding_box-inl.cuh" #include "./bounding_box-inl.h" #include "../elemwise_op_common.h" namespace mxnet { namespace op { namespace { using mshadow::Tensor; using mshadow::Stream; template <typename DType> struct TempWorkspace { index_t scores_temp_space; DType* scores; index_t scratch_space; uint8_t* scratch; index_t buffer_space; DType* buffer; index_t nms_scratch_space; uint32_t* nms_scratch; index_t indices_temp_spaces; index_t* indices; }; inline index_t ceil_div(index_t x, index_t y) { return (x + y - 1) / y; } inline index_t align(index_t x, index_t alignment) { return ceil_div(x, alignment) * alignment; } template <typename DType> __global__ void FilterAndPrepareAuxDataKernel(const DType* data, DType* out, DType* scores, index_t num_elements_per_batch, const index_t element_width, const index_t N, const float threshold, const int id_index, const int score_index, const int background_id) { index_t tid = blockIdx.x * blockDim.x + threadIdx.x; bool first_in_element = (tid % element_width == 0); index_t start_of_my_element = tid - (tid % element_width); if (tid < N) { DType my_score = data[start_of_my_element + score_index]; bool filtered_out = my_score <= threshold; if (id_index != -1 && background_id != -1) { DType my_id = data[start_of_my_element + id_index]; filtered_out = filtered_out || (my_id == background_id); } if (!filtered_out) { out[tid] = data[tid]; } else { out[tid] = -1; my_score = -1; } if (first_in_element) { index_t offset = tid / element_width; scores[offset] = my_score; } } } template <typename DType> void FilterAndPrepareAuxData(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, const TempWorkspace<DType>& workspace, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; index_t N = data.shape_.Size(); const auto blocks = ceil_div(N, n_threads); hipLaunchKernelGGL(( FilterAndPrepareAuxDataKernel), dim3(blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data.dptr_, out->dptr_, workspace.scores, data.shape_[1], data.shape_[2], N, param.valid_thresh, param.id_index, param.score_index, param.background_id); } template <bool check_topk, bool check_score, typename DType> __global__ void CompactDataKernel(const index_t* indices, const DType* source, DType* destination, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int score_index, const index_t N) { const index_t tid_start = blockIdx.x * blockDim.x + threadIdx.x; for (index_t tid = tid_start; tid < N; tid += blockDim.x * gridDim.x) { const index_t my_element = tid / element_width; const index_t my_element_in_batch = my_element % num_elements_per_batch; if (check_topk && my_element_in_batch >= topk) { destination[tid] = -1; } else { DType ret; const index_t source_element = indices[my_element]; DType score = 0; if (check_score) { score = source[source_element * element_width + score_index]; } if (score >= 0) { ret = source[source_element * element_width + tid % element_width]; } else { ret = -1; } destination[tid] = ret; } } } template <bool check_score, typename DType> void CompactData(const Tensor<gpu, 1, index_t>& indices, const Tensor<gpu, 3, DType>& source, Tensor<gpu, 3, DType>* destination, const index_t topk, const int score_index, Stream<gpu>* s) { const int n_threads = 512; const index_t max_blocks = 320; index_t N = source.shape_.Size(); const auto blocks = ::min(ceil_div(N, n_threads), max_blocks); if (topk > 0) { hipLaunchKernelGGL(( CompactDataKernel<true, check_score>), dim3(blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } else { hipLaunchKernelGGL(( CompactDataKernel<false, check_score>), dim3(blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } } template <typename DType> void WorkspaceForSort(const index_t num_elem, const index_t topk, const int alignment, TempWorkspace<DType>* workspace) { const index_t sort_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(num_elem, false, false); const index_t sort_topk_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(topk, false, false); workspace->scratch_space = align(::max(sort_scores_temp_space, sort_topk_scores_temp_space), alignment); } template <int encode, typename DType> __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold); template <typename DType> __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType * data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elems, const index_t start_index, const index_t topk); template <typename DType> __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch); template <typename DType> struct NMS { static constexpr int THRESHOLD = 512; void operator()(Tensor<gpu, 3, DType>* data, Tensor<gpu, 2, uint32_t>* scratch, const index_t topk, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; const index_t num_batches = data->shape_[0]; const index_t num_elements_per_batch = data->shape_[1]; const index_t element_width = data->shape_[2]; for (index_t current_start = 0; current_start < topk; current_start += THRESHOLD) { const index_t n_elems = topk - current_start; const index_t num_blocks_per_row_batch = ceil_div(n_elems, n_threads); const index_t num_blocks_per_row = num_blocks_per_row_batch * num_batches; const index_t n_blocks = THRESHOLD / (sizeof(uint32_t) * 8) * num_blocks_per_row; if (param.in_format == box_common_enum::kCorner) { hipLaunchKernelGGL(( CalculateGreedyNMSResultsKernel<box_common_enum::kCorner>) , dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } else { hipLaunchKernelGGL(( CalculateGreedyNMSResultsKernel<box_common_enum::kCenter>) , dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } hipLaunchKernelGGL(( ReduceNMSResultTriangleKernel), dim3(num_batches), dim3(THRESHOLD), 0, Stream<gpu>::GetStream(s), scratch->dptr_, data->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk); const index_t n_rest_elems = n_elems - THRESHOLD; const index_t num_rest_blocks_per_batch = ceil_div(n_rest_elems, n_threads); const index_t num_rest_blocks = num_rest_blocks_per_batch * num_batches; if (n_rest_elems > 0) { hipLaunchKernelGGL(( ReduceNMSResultRestKernel), dim3(num_rest_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data->dptr_, scratch->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk, num_rest_blocks_per_batch); } } } }; template <int encode, typename DType> __device__ __forceinline__ DType calculate_area(const DType b0, const DType b1, const DType b2, const DType b3) { DType width = b2; DType height = b3; if (encode == box_common_enum::kCorner) { width -= b0; height -= b1; } if (width < 0 || height < 0) return 0; return width * height; } template <int encode, typename DType> __device__ __forceinline__ DType calculate_intersection(const DType a0, const DType a1, const DType a2, const DType a3, const DType b0, const DType b1, const DType b2, const DType b3) { DType wx, wy; if (encode == box_common_enum::kCorner) { const DType left = a0 > b0 ? a0 : b0; const DType bottom = a1 > b1 ? a1 : b1; const DType right = a2 < b2 ? a2 : b2; const DType top = a3 < b3 ? a3 : b3; wx = right - left; wy = top - bottom; } else { const DType al = 2 * a0 - a2; const DType ar = 2 * a0 + a2; const DType bl = 2 * b0 - b2; const DType br = 2 * b0 + b2; const DType left = bl > al ? bl : al; const DType right = br < ar ? br : ar; wx = right - left; const DType ab = 2 * a1 - a3; const DType at = 2 * a1 + a3; const DType bb = 2 * b1 - b3; const DType bt = 2 * b1 + b3; const DType bottom = bb > ab ? bb : ab; const DType top = bt < at ? bt : at; wy = top - bottom; wy = wy / 4; // To compensate for both wx and wy being 2x too large } if (wx <= 0 || wy <= 0) { return 0; } else { return (wx * wy); } } template <int encode, typename DType> __launch_bounds__(512) __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold) { constexpr int max_elem_width = 20; constexpr int num_other_boxes = sizeof(uint32_t) * 8; __shared__ DType other_boxes[max_elem_width * num_other_boxes]; __shared__ DType other_boxes_areas[num_other_boxes]; const index_t my_row = blockIdx.x / num_blocks_per_row; const index_t my_block_offset_in_row = blockIdx.x % num_blocks_per_row; const index_t my_block_offset_in_batch = my_block_offset_in_row % num_blocks_per_row_batch; const index_t my_batch = (my_block_offset_in_row) / num_blocks_per_row_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + current_start + threadIdx.x; // Load other boxes const index_t offset = (my_batch * num_elements_per_batch + current_start + my_row * num_other_boxes) * element_width; for (int i = threadIdx.x; i < element_width * num_other_boxes; i += blockDim.x) { other_boxes[i] = data[offset + i]; } __syncthreads(); if (threadIdx.x < num_other_boxes) { const int other_boxes_offset = element_width * threadIdx.x; const DType their_area = calculate_area<encode>( other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); other_boxes_areas[threadIdx.x] = their_area; } __syncthreads(); if (my_element_in_batch >= topk) return; DType my_box[4]; DType my_class = -1; DType my_score = -1; const index_t my_offset = (my_batch * num_elements_per_batch + my_element_in_batch) * element_width; my_score = data[my_offset + score_index]; #pragma unroll for (int i = 0; i < 4; ++i) { my_box[i] = data[my_offset + coord_index + i]; } if (class_index != -1) { my_class = data[my_offset + class_index]; } DType my_area = calculate_area<encode>(my_box[0], my_box[1], my_box[2], my_box[3]); uint32_t ret = 0; if (my_score != -1) { #pragma unroll for (int i = 0; i < num_other_boxes; ++i) { const int other_boxes_offset = element_width * i; if ((class_index == -1 || my_class == other_boxes[other_boxes_offset + class_index]) && other_boxes[other_boxes_offset + score_index] != -1) { const DType their_area = other_boxes_areas[i]; const DType intersect = calculate_intersection<encode>( my_box[0], my_box[1], my_box[2], my_box[3], other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); if (intersect > threshold * (my_area + their_area - intersect)) { ret = ret | (1u << i); } } } } result[(my_row * num_batches + my_batch) * topk + my_element_in_batch] = ~ret; } template <typename DType> __launch_bounds__(NMS<DType>::THRESHOLD) __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType * data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk) { constexpr int n_threads = NMS<DType>::THRESHOLD; constexpr int warp_size = 32; const index_t my_batch = blockIdx.x; const index_t my_element_in_batch = threadIdx.x + start_index; const index_t my_element = my_batch * topk + my_element_in_batch; const int my_warp = threadIdx.x / warp_size; const int my_lane = threadIdx.x % warp_size; __shared__ uint32_t current_valid_boxes[n_threads / warp_size]; const uint32_t full_mask = 0xFFFFFFFF; const uint32_t my_lane_mask = 1 << my_lane; const uint32_t earlier_threads_mask = (1 << (my_lane + 1)) - 1; uint32_t valid = my_lane_mask; uint32_t valid_boxes = full_mask; uint32_t my_next_mask = my_element_in_batch < topk ? nms_results[my_element]: full_mask; #pragma unroll for (int i = 0; i < n_threads / warp_size; ++i) { uint32_t my_mask = my_next_mask; my_next_mask = (((i + 1) < n_threads / warp_size) && (my_element_in_batch < topk)) ? nms_results[(i + 1) * topk * num_batches + my_element]: full_mask; if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) { my_mask = my_mask | earlier_threads_mask; // Loop over warp_size - 1 because the last // thread does not contribute to the mask anyway #pragma unroll for (int j = 0; j < warp_size - 1; ++j) { const uint32_t mask = __shfl_sync(full_mask, valid ? my_mask : full_mask, j); valid = valid & mask; } valid_boxes = __ballot_sync(full_mask, valid); } if (my_lane == 0 && my_warp == i) { current_valid_boxes[i] = valid_boxes; } __syncthreads(); if ((my_warp > i) && (((~my_mask) & current_valid_boxes[i]) != 0)) { valid = 0; } } if (my_lane == 0) { nms_results[my_element] = valid_boxes; } if (valid == 0) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> __launch_bounds__(512) __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch) { constexpr int num_other_boxes = sizeof(uint32_t) * 8; constexpr int num_iterations = NMS<DType>::THRESHOLD / num_other_boxes; constexpr int warp_size = 32; const index_t my_block_offset_in_batch = blockIdx.x % num_blocks_per_batch; const index_t my_batch = blockIdx.x / num_blocks_per_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + start_index + NMS<DType>::THRESHOLD + threadIdx.x; const index_t my_element = my_batch * topk + my_element_in_batch; if (my_element_in_batch >= topk) return; bool valid = true; #pragma unroll for (int i = 0; i < num_iterations; ++i) { const uint32_t my_mask = nms_results[i * topk * num_batches + my_element]; const uint32_t valid_boxes = nms_results[my_batch * topk + i * warp_size + start_index]; const bool no_hit = (valid_boxes & (~my_mask)) == 0; valid = valid && no_hit; } if (!valid) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> TempWorkspace<DType> GetWorkspace(const index_t num_batch, const index_t num_elem, const int width_elem, const index_t topk, const OpContext& ctx) { TempWorkspace<DType> workspace; Stream<gpu> *s = ctx.get_stream<gpu>(); const int alignment = 128; // Get the workspace size workspace.scores_temp_space = 2 * align(num_batch * num_elem * sizeof(DType), alignment); workspace.indices_temp_spaces = 2 * align(num_batch * num_elem * sizeof(index_t), alignment); WorkspaceForSort(num_elem, topk, alignment, &workspace); // Place for a buffer workspace.buffer_space = align(num_batch * num_elem * width_elem * sizeof(DType), alignment); workspace.nms_scratch_space = align(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8) * num_batch * topk * sizeof(uint32_t), alignment); const index_t workspace_size = workspace.scores_temp_space + workspace.scratch_space + workspace.nms_scratch_space + workspace.indices_temp_spaces; // Obtain the memory for workspace Tensor<gpu, 1, uint8_t> scratch_memory = ctx.requested[box_nms_enum::kTempSpace] .get_space_typed<gpu, 1, uint8_t>(mshadow::Shape1(workspace_size), s); // Populate workspace pointers workspace.scores = reinterpret_cast<DType*>(scratch_memory.dptr_); workspace.scratch = reinterpret_cast<uint8_t*>(workspace.scores) + workspace.scores_temp_space; workspace.buffer = reinterpret_cast<DType*>(workspace.scratch + workspace.scratch_space); workspace.nms_scratch = reinterpret_cast<uint32_t*>( reinterpret_cast<uint8_t*>(workspace.buffer) + workspace.buffer_space); workspace.indices = reinterpret_cast<index_t*>( reinterpret_cast<uint8_t*>(workspace.nms_scratch) + workspace.nms_scratch_space); return workspace; } template <typename DType> __global__ void ExtractScoresKernel(const DType* data, DType* scores, const index_t N, const int element_width, const int score_index) { const index_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) { scores[tid] = data[tid * element_width + score_index]; } } template <typename DType> void CompactNMSResults(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, Tensor<gpu, 1, index_t>* indices, Tensor<gpu, 1, DType>* scores, Tensor<gpu, 1, index_t>* sorted_indices, Tensor<gpu, 1, DType>* sorted_scores, Tensor<gpu, 1, char>* scratch, const int score_index, const index_t topk, Stream<gpu>* s) { using mshadow::Shape1; constexpr int n_threads = 512; const index_t num_elements = scores->shape_.Size(); const index_t num_elements_per_batch = data.shape_[1]; const index_t num_batches = data.shape_[0]; const int element_width = data.shape_[2]; const index_t n_blocks = ceil_div(num_elements, n_threads); hipLaunchKernelGGL(( ExtractScoresKernel), dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data.dptr_, scores->dptr_, num_elements, element_width, score_index); *indices = mshadow::expr::range<index_t>(0, num_elements); for (index_t i = 0; i < num_batches; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> indices_batch(indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<true>(*sorted_indices, data, out, topk, score_index, s); } } // namespace void BoxNMSForwardGPU_notemp(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using mshadow::Shape1; using mshadow::Shape2; using mshadow::Shape3; CHECK_NE(req[0], kAddTo) << "BoxNMS does not support kAddTo"; CHECK_NE(req[0], kWriteInplace) << "BoxNMS does not support in place computation"; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; const BoxNMSParam& param = nnvm::get<BoxNMSParam>(attrs.parsed); Stream<gpu> *s = ctx.get_stream<gpu>(); mxnet::TShape in_shape = inputs[box_nms_enum::kData].shape_; int indim = in_shape.ndim(); int num_batch = indim <= 2? 1 : in_shape.ProdShape(0, indim - 2); int num_elem = in_shape[indim - 2]; int width_elem = in_shape[indim - 1]; MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<gpu, 3, DType> data = inputs[box_nms_enum::kData] .get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 3, DType> out = outputs[box_nms_enum::kOut] .get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s); // Special case for topk == 0 if (param.topk == 0) { if (req[0] != kNullOp && req[0] != kWriteInplace) { out = mshadow::expr::F<mshadow_op::identity>(data); } return; } index_t topk = param.topk > 0 ? ::min(param.topk, num_elem) : num_elem; const auto& workspace = GetWorkspace<DType>(num_batch, num_elem, width_elem, topk, ctx); FilterAndPrepareAuxData(data, &out, workspace, param, s); Tensor<gpu, 1, DType> scores(workspace.scores, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, DType> sorted_scores(workspace.scores + scores.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> indices(workspace.indices, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> sorted_indices(workspace.indices + indices.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, char> scratch(reinterpret_cast<char*>(workspace.scratch), Shape1(workspace.scratch_space), s); Tensor<gpu, 3, DType> buffer(workspace.buffer, Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 2, uint32_t> nms_scratch(workspace.nms_scratch, Shape2(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8), topk * num_batch), s); indices = mshadow::expr::range<index_t>(0, num_batch * num_elem); for (index_t i = 0; i < num_batch; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> indices_batch(indices.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices.dptr_ + i * num_elem, Shape1(num_elem), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, &scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<false>(sorted_indices, out, &buffer, topk, -1, s); NMS<DType> nms; nms(&buffer, &nms_scratch, topk, param, s); CompactNMSResults(buffer, &out, &indices, &scores, &sorted_indices, &sorted_scores, &scratch, param.score_index, topk, s); // convert encoding if (param.in_format != param.out_format) { if (box_common_enum::kCenter == param.out_format) { mxnet::op::mxnet_op::Kernel<corner_to_center, gpu>::Launch(s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } else { mxnet::op::mxnet_op::Kernel<center_to_corner, gpu>::Launch(s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } } }); } void BoxNMSForwardGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; if (req[1] == kNullOp) { BoxNMSForwardGPU_notemp(attrs, ctx, inputs, req, outputs); return; } BoxNMSForward<gpu>(attrs, ctx, inputs, req, outputs); } NNVM_REGISTER_OP(_contrib_box_nms) .set_attr<FCompute>("FCompute<gpu>", BoxNMSForwardGPU); NNVM_REGISTER_OP(_backward_contrib_box_nms) .set_attr<FCompute>("FCompute<gpu>", BoxNMSBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_iou) .set_attr<FCompute>("FCompute<gpu>", BoxOverlapForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_box_iou) .set_attr<FCompute>("FCompute<gpu>", BoxOverlapBackward<gpu>); NNVM_REGISTER_OP(_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_encode) .set_attr<FCompute>("FCompute<gpu>", BoxEncodeForward<gpu>); NNVM_REGISTER_OP(_contrib_box_decode) .set_attr<FCompute>("FCompute<gpu>", BoxDecodeForward<gpu>); } // namespace op } // namespace mxnet
357849ee3ca0c6ee7e80f0d86aa74ac421b7e45f.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file bounding_box.cu * \brief Bounding box util functions and operators * \author Joshua Zhang */ #include <cub/cub.cuh> #include "./bounding_box-inl.cuh" #include "./bounding_box-inl.h" #include "../elemwise_op_common.h" namespace mxnet { namespace op { namespace { using mshadow::Tensor; using mshadow::Stream; template <typename DType> struct TempWorkspace { index_t scores_temp_space; DType* scores; index_t scratch_space; uint8_t* scratch; index_t buffer_space; DType* buffer; index_t nms_scratch_space; uint32_t* nms_scratch; index_t indices_temp_spaces; index_t* indices; }; inline index_t ceil_div(index_t x, index_t y) { return (x + y - 1) / y; } inline index_t align(index_t x, index_t alignment) { return ceil_div(x, alignment) * alignment; } template <typename DType> __global__ void FilterAndPrepareAuxDataKernel(const DType* data, DType* out, DType* scores, index_t num_elements_per_batch, const index_t element_width, const index_t N, const float threshold, const int id_index, const int score_index, const int background_id) { index_t tid = blockIdx.x * blockDim.x + threadIdx.x; bool first_in_element = (tid % element_width == 0); index_t start_of_my_element = tid - (tid % element_width); if (tid < N) { DType my_score = data[start_of_my_element + score_index]; bool filtered_out = my_score <= threshold; if (id_index != -1 && background_id != -1) { DType my_id = data[start_of_my_element + id_index]; filtered_out = filtered_out || (my_id == background_id); } if (!filtered_out) { out[tid] = data[tid]; } else { out[tid] = -1; my_score = -1; } if (first_in_element) { index_t offset = tid / element_width; scores[offset] = my_score; } } } template <typename DType> void FilterAndPrepareAuxData(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, const TempWorkspace<DType>& workspace, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; index_t N = data.shape_.Size(); const auto blocks = ceil_div(N, n_threads); FilterAndPrepareAuxDataKernel<<<blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data.dptr_, out->dptr_, workspace.scores, data.shape_[1], data.shape_[2], N, param.valid_thresh, param.id_index, param.score_index, param.background_id); } template <bool check_topk, bool check_score, typename DType> __global__ void CompactDataKernel(const index_t* indices, const DType* source, DType* destination, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int score_index, const index_t N) { const index_t tid_start = blockIdx.x * blockDim.x + threadIdx.x; for (index_t tid = tid_start; tid < N; tid += blockDim.x * gridDim.x) { const index_t my_element = tid / element_width; const index_t my_element_in_batch = my_element % num_elements_per_batch; if (check_topk && my_element_in_batch >= topk) { destination[tid] = -1; } else { DType ret; const index_t source_element = indices[my_element]; DType score = 0; if (check_score) { score = source[source_element * element_width + score_index]; } if (score >= 0) { ret = source[source_element * element_width + tid % element_width]; } else { ret = -1; } destination[tid] = ret; } } } template <bool check_score, typename DType> void CompactData(const Tensor<gpu, 1, index_t>& indices, const Tensor<gpu, 3, DType>& source, Tensor<gpu, 3, DType>* destination, const index_t topk, const int score_index, Stream<gpu>* s) { const int n_threads = 512; const index_t max_blocks = 320; index_t N = source.shape_.Size(); const auto blocks = std::min(ceil_div(N, n_threads), max_blocks); if (topk > 0) { CompactDataKernel<true, check_score><<<blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } else { CompactDataKernel<false, check_score><<<blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } } template <typename DType> void WorkspaceForSort(const index_t num_elem, const index_t topk, const int alignment, TempWorkspace<DType>* workspace) { const index_t sort_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(num_elem, false, false); const index_t sort_topk_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(topk, false, false); workspace->scratch_space = align(std::max(sort_scores_temp_space, sort_topk_scores_temp_space), alignment); } template <int encode, typename DType> __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold); template <typename DType> __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType * data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elems, const index_t start_index, const index_t topk); template <typename DType> __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch); template <typename DType> struct NMS { static constexpr int THRESHOLD = 512; void operator()(Tensor<gpu, 3, DType>* data, Tensor<gpu, 2, uint32_t>* scratch, const index_t topk, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; const index_t num_batches = data->shape_[0]; const index_t num_elements_per_batch = data->shape_[1]; const index_t element_width = data->shape_[2]; for (index_t current_start = 0; current_start < topk; current_start += THRESHOLD) { const index_t n_elems = topk - current_start; const index_t num_blocks_per_row_batch = ceil_div(n_elems, n_threads); const index_t num_blocks_per_row = num_blocks_per_row_batch * num_batches; const index_t n_blocks = THRESHOLD / (sizeof(uint32_t) * 8) * num_blocks_per_row; if (param.in_format == box_common_enum::kCorner) { CalculateGreedyNMSResultsKernel<box_common_enum::kCorner> <<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } else { CalculateGreedyNMSResultsKernel<box_common_enum::kCenter> <<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } ReduceNMSResultTriangleKernel<<<num_batches, THRESHOLD, 0, Stream<gpu>::GetStream(s)>>>( scratch->dptr_, data->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk); const index_t n_rest_elems = n_elems - THRESHOLD; const index_t num_rest_blocks_per_batch = ceil_div(n_rest_elems, n_threads); const index_t num_rest_blocks = num_rest_blocks_per_batch * num_batches; if (n_rest_elems > 0) { ReduceNMSResultRestKernel<<<num_rest_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data->dptr_, scratch->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk, num_rest_blocks_per_batch); } } } }; template <int encode, typename DType> __device__ __forceinline__ DType calculate_area(const DType b0, const DType b1, const DType b2, const DType b3) { DType width = b2; DType height = b3; if (encode == box_common_enum::kCorner) { width -= b0; height -= b1; } if (width < 0 || height < 0) return 0; return width * height; } template <int encode, typename DType> __device__ __forceinline__ DType calculate_intersection(const DType a0, const DType a1, const DType a2, const DType a3, const DType b0, const DType b1, const DType b2, const DType b3) { DType wx, wy; if (encode == box_common_enum::kCorner) { const DType left = a0 > b0 ? a0 : b0; const DType bottom = a1 > b1 ? a1 : b1; const DType right = a2 < b2 ? a2 : b2; const DType top = a3 < b3 ? a3 : b3; wx = right - left; wy = top - bottom; } else { const DType al = 2 * a0 - a2; const DType ar = 2 * a0 + a2; const DType bl = 2 * b0 - b2; const DType br = 2 * b0 + b2; const DType left = bl > al ? bl : al; const DType right = br < ar ? br : ar; wx = right - left; const DType ab = 2 * a1 - a3; const DType at = 2 * a1 + a3; const DType bb = 2 * b1 - b3; const DType bt = 2 * b1 + b3; const DType bottom = bb > ab ? bb : ab; const DType top = bt < at ? bt : at; wy = top - bottom; wy = wy / 4; // To compensate for both wx and wy being 2x too large } if (wx <= 0 || wy <= 0) { return 0; } else { return (wx * wy); } } template <int encode, typename DType> __launch_bounds__(512) __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold) { constexpr int max_elem_width = 20; constexpr int num_other_boxes = sizeof(uint32_t) * 8; __shared__ DType other_boxes[max_elem_width * num_other_boxes]; __shared__ DType other_boxes_areas[num_other_boxes]; const index_t my_row = blockIdx.x / num_blocks_per_row; const index_t my_block_offset_in_row = blockIdx.x % num_blocks_per_row; const index_t my_block_offset_in_batch = my_block_offset_in_row % num_blocks_per_row_batch; const index_t my_batch = (my_block_offset_in_row) / num_blocks_per_row_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + current_start + threadIdx.x; // Load other boxes const index_t offset = (my_batch * num_elements_per_batch + current_start + my_row * num_other_boxes) * element_width; for (int i = threadIdx.x; i < element_width * num_other_boxes; i += blockDim.x) { other_boxes[i] = data[offset + i]; } __syncthreads(); if (threadIdx.x < num_other_boxes) { const int other_boxes_offset = element_width * threadIdx.x; const DType their_area = calculate_area<encode>( other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); other_boxes_areas[threadIdx.x] = their_area; } __syncthreads(); if (my_element_in_batch >= topk) return; DType my_box[4]; DType my_class = -1; DType my_score = -1; const index_t my_offset = (my_batch * num_elements_per_batch + my_element_in_batch) * element_width; my_score = data[my_offset + score_index]; #pragma unroll for (int i = 0; i < 4; ++i) { my_box[i] = data[my_offset + coord_index + i]; } if (class_index != -1) { my_class = data[my_offset + class_index]; } DType my_area = calculate_area<encode>(my_box[0], my_box[1], my_box[2], my_box[3]); uint32_t ret = 0; if (my_score != -1) { #pragma unroll for (int i = 0; i < num_other_boxes; ++i) { const int other_boxes_offset = element_width * i; if ((class_index == -1 || my_class == other_boxes[other_boxes_offset + class_index]) && other_boxes[other_boxes_offset + score_index] != -1) { const DType their_area = other_boxes_areas[i]; const DType intersect = calculate_intersection<encode>( my_box[0], my_box[1], my_box[2], my_box[3], other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); if (intersect > threshold * (my_area + their_area - intersect)) { ret = ret | (1u << i); } } } } result[(my_row * num_batches + my_batch) * topk + my_element_in_batch] = ~ret; } template <typename DType> __launch_bounds__(NMS<DType>::THRESHOLD) __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType * data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk) { constexpr int n_threads = NMS<DType>::THRESHOLD; constexpr int warp_size = 32; const index_t my_batch = blockIdx.x; const index_t my_element_in_batch = threadIdx.x + start_index; const index_t my_element = my_batch * topk + my_element_in_batch; const int my_warp = threadIdx.x / warp_size; const int my_lane = threadIdx.x % warp_size; __shared__ uint32_t current_valid_boxes[n_threads / warp_size]; const uint32_t full_mask = 0xFFFFFFFF; const uint32_t my_lane_mask = 1 << my_lane; const uint32_t earlier_threads_mask = (1 << (my_lane + 1)) - 1; uint32_t valid = my_lane_mask; uint32_t valid_boxes = full_mask; uint32_t my_next_mask = my_element_in_batch < topk ? nms_results[my_element]: full_mask; #pragma unroll for (int i = 0; i < n_threads / warp_size; ++i) { uint32_t my_mask = my_next_mask; my_next_mask = (((i + 1) < n_threads / warp_size) && (my_element_in_batch < topk)) ? nms_results[(i + 1) * topk * num_batches + my_element]: full_mask; if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) { my_mask = my_mask | earlier_threads_mask; // Loop over warp_size - 1 because the last // thread does not contribute to the mask anyway #pragma unroll for (int j = 0; j < warp_size - 1; ++j) { const uint32_t mask = __shfl_sync(full_mask, valid ? my_mask : full_mask, j); valid = valid & mask; } valid_boxes = __ballot_sync(full_mask, valid); } if (my_lane == 0 && my_warp == i) { current_valid_boxes[i] = valid_boxes; } __syncthreads(); if ((my_warp > i) && (((~my_mask) & current_valid_boxes[i]) != 0)) { valid = 0; } } if (my_lane == 0) { nms_results[my_element] = valid_boxes; } if (valid == 0) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> __launch_bounds__(512) __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch) { constexpr int num_other_boxes = sizeof(uint32_t) * 8; constexpr int num_iterations = NMS<DType>::THRESHOLD / num_other_boxes; constexpr int warp_size = 32; const index_t my_block_offset_in_batch = blockIdx.x % num_blocks_per_batch; const index_t my_batch = blockIdx.x / num_blocks_per_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + start_index + NMS<DType>::THRESHOLD + threadIdx.x; const index_t my_element = my_batch * topk + my_element_in_batch; if (my_element_in_batch >= topk) return; bool valid = true; #pragma unroll for (int i = 0; i < num_iterations; ++i) { const uint32_t my_mask = nms_results[i * topk * num_batches + my_element]; const uint32_t valid_boxes = nms_results[my_batch * topk + i * warp_size + start_index]; const bool no_hit = (valid_boxes & (~my_mask)) == 0; valid = valid && no_hit; } if (!valid) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> TempWorkspace<DType> GetWorkspace(const index_t num_batch, const index_t num_elem, const int width_elem, const index_t topk, const OpContext& ctx) { TempWorkspace<DType> workspace; Stream<gpu> *s = ctx.get_stream<gpu>(); const int alignment = 128; // Get the workspace size workspace.scores_temp_space = 2 * align(num_batch * num_elem * sizeof(DType), alignment); workspace.indices_temp_spaces = 2 * align(num_batch * num_elem * sizeof(index_t), alignment); WorkspaceForSort(num_elem, topk, alignment, &workspace); // Place for a buffer workspace.buffer_space = align(num_batch * num_elem * width_elem * sizeof(DType), alignment); workspace.nms_scratch_space = align(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8) * num_batch * topk * sizeof(uint32_t), alignment); const index_t workspace_size = workspace.scores_temp_space + workspace.scratch_space + workspace.nms_scratch_space + workspace.indices_temp_spaces; // Obtain the memory for workspace Tensor<gpu, 1, uint8_t> scratch_memory = ctx.requested[box_nms_enum::kTempSpace] .get_space_typed<gpu, 1, uint8_t>(mshadow::Shape1(workspace_size), s); // Populate workspace pointers workspace.scores = reinterpret_cast<DType*>(scratch_memory.dptr_); workspace.scratch = reinterpret_cast<uint8_t*>(workspace.scores) + workspace.scores_temp_space; workspace.buffer = reinterpret_cast<DType*>(workspace.scratch + workspace.scratch_space); workspace.nms_scratch = reinterpret_cast<uint32_t*>( reinterpret_cast<uint8_t*>(workspace.buffer) + workspace.buffer_space); workspace.indices = reinterpret_cast<index_t*>( reinterpret_cast<uint8_t*>(workspace.nms_scratch) + workspace.nms_scratch_space); return workspace; } template <typename DType> __global__ void ExtractScoresKernel(const DType* data, DType* scores, const index_t N, const int element_width, const int score_index) { const index_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) { scores[tid] = data[tid * element_width + score_index]; } } template <typename DType> void CompactNMSResults(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, Tensor<gpu, 1, index_t>* indices, Tensor<gpu, 1, DType>* scores, Tensor<gpu, 1, index_t>* sorted_indices, Tensor<gpu, 1, DType>* sorted_scores, Tensor<gpu, 1, char>* scratch, const int score_index, const index_t topk, Stream<gpu>* s) { using mshadow::Shape1; constexpr int n_threads = 512; const index_t num_elements = scores->shape_.Size(); const index_t num_elements_per_batch = data.shape_[1]; const index_t num_batches = data.shape_[0]; const int element_width = data.shape_[2]; const index_t n_blocks = ceil_div(num_elements, n_threads); ExtractScoresKernel<<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data.dptr_, scores->dptr_, num_elements, element_width, score_index); *indices = mshadow::expr::range<index_t>(0, num_elements); for (index_t i = 0; i < num_batches; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> indices_batch(indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<true>(*sorted_indices, data, out, topk, score_index, s); } } // namespace void BoxNMSForwardGPU_notemp(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using mshadow::Shape1; using mshadow::Shape2; using mshadow::Shape3; CHECK_NE(req[0], kAddTo) << "BoxNMS does not support kAddTo"; CHECK_NE(req[0], kWriteInplace) << "BoxNMS does not support in place computation"; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; const BoxNMSParam& param = nnvm::get<BoxNMSParam>(attrs.parsed); Stream<gpu> *s = ctx.get_stream<gpu>(); mxnet::TShape in_shape = inputs[box_nms_enum::kData].shape_; int indim = in_shape.ndim(); int num_batch = indim <= 2? 1 : in_shape.ProdShape(0, indim - 2); int num_elem = in_shape[indim - 2]; int width_elem = in_shape[indim - 1]; MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<gpu, 3, DType> data = inputs[box_nms_enum::kData] .get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 3, DType> out = outputs[box_nms_enum::kOut] .get_with_shape<gpu, 3, DType>(Shape3(num_batch, num_elem, width_elem), s); // Special case for topk == 0 if (param.topk == 0) { if (req[0] != kNullOp && req[0] != kWriteInplace) { out = mshadow::expr::F<mshadow_op::identity>(data); } return; } index_t topk = param.topk > 0 ? std::min(param.topk, num_elem) : num_elem; const auto& workspace = GetWorkspace<DType>(num_batch, num_elem, width_elem, topk, ctx); FilterAndPrepareAuxData(data, &out, workspace, param, s); Tensor<gpu, 1, DType> scores(workspace.scores, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, DType> sorted_scores(workspace.scores + scores.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> indices(workspace.indices, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> sorted_indices(workspace.indices + indices.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, char> scratch(reinterpret_cast<char*>(workspace.scratch), Shape1(workspace.scratch_space), s); Tensor<gpu, 3, DType> buffer(workspace.buffer, Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 2, uint32_t> nms_scratch(workspace.nms_scratch, Shape2(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8), topk * num_batch), s); indices = mshadow::expr::range<index_t>(0, num_batch * num_elem); for (index_t i = 0; i < num_batch; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> indices_batch(indices.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, DType> sorted_scores_batch(sorted_scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> sorted_indices_batch(sorted_indices.dptr_ + i * num_elem, Shape1(num_elem), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, &scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<false>(sorted_indices, out, &buffer, topk, -1, s); NMS<DType> nms; nms(&buffer, &nms_scratch, topk, param, s); CompactNMSResults(buffer, &out, &indices, &scores, &sorted_indices, &sorted_scores, &scratch, param.score_index, topk, s); // convert encoding if (param.in_format != param.out_format) { if (box_common_enum::kCenter == param.out_format) { mxnet::op::mxnet_op::Kernel<corner_to_center, gpu>::Launch(s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } else { mxnet::op::mxnet_op::Kernel<center_to_corner, gpu>::Launch(s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } } }); } void BoxNMSForwardGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; if (req[1] == kNullOp) { BoxNMSForwardGPU_notemp(attrs, ctx, inputs, req, outputs); return; } BoxNMSForward<gpu>(attrs, ctx, inputs, req, outputs); } NNVM_REGISTER_OP(_contrib_box_nms) .set_attr<FCompute>("FCompute<gpu>", BoxNMSForwardGPU); NNVM_REGISTER_OP(_backward_contrib_box_nms) .set_attr<FCompute>("FCompute<gpu>", BoxNMSBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_iou) .set_attr<FCompute>("FCompute<gpu>", BoxOverlapForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_box_iou) .set_attr<FCompute>("FCompute<gpu>", BoxOverlapBackward<gpu>); NNVM_REGISTER_OP(_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_encode) .set_attr<FCompute>("FCompute<gpu>", BoxEncodeForward<gpu>); NNVM_REGISTER_OP(_contrib_box_decode) .set_attr<FCompute>("FCompute<gpu>", BoxDecodeForward<gpu>); } // namespace op } // namespace mxnet
e2fb829179d14dea2dc7532e5c2f4f1962f58d17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void _setPrecisionKernel(float* data, size_t size, int* precision) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; int prec = precision[idx]; int mul = 1; while(prec--) mul *= 10; data[idx] = (float)(int)(data[idx]*mul); data[idx] /= mul; }
e2fb829179d14dea2dc7532e5c2f4f1962f58d17.cu
#include "includes.h" __global__ void _setPrecisionKernel(float* data, size_t size, int* precision) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; int prec = precision[idx]; int mul = 1; while(prec--) mul *= 10; data[idx] = (float)(int)(data[idx]*mul); data[idx] /= mul; }
32fe114267a48273f74243d809c6afe1c6f07717.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** ************************************************************************** */ #include "Common.h" #define SCALEOUTPUT #define GETLIMITS #define SCALE #define OUTPUT //include Kernels #include "laplacian_kernel.cu" void WrapperCUDA_pixME(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size) { //allocate device memory float *Src; float *Dst; size_t DstStride; /*Allocate memory in the Device */ hipMallocPitch((void **)(&Src), &DstStride, Size.width * sizeof(float), Size.height); hipMemset2D((void *)(Src), DstStride,0, Size.width * sizeof(float), Size.height); hipMallocPitch((void **)(&Dst), &DstStride, Size.width * sizeof(float), Size.height); hipMemset2D((void *)(Dst), DstStride,0, Size.width * sizeof(float), Size.height); DstStride /= sizeof(float); //convert source image to float representation int ImgSrcFStride; float *ImgSrcF = MallocPlaneFloat(Size.width, Size.height, &ImgSrcFStride); CopyByte2Float(ImgSrc, Stride, ImgSrcF, ImgSrcFStride, Size); //Copy from host memory to device hipMemcpy2D(Src, ImgSrcFStride * sizeof(float), ImgSrcF, ImgSrcFStride * sizeof(float), Size.width * sizeof(float), Size.height,hipMemcpyHostToDevice); //setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE); // TODO: the algorithm should be as follows: // (1) find the edges with the laplacian kernel = dI // (2) baseline correct and scale the laplacian "edges" image to 0,255 = dI' // (3) subtract the corrected edges from the image -- I' = I-dI' // (4) correct and scale image = I-sharpened //execute CUDA kernel hipLaunchKernelGGL(( CUDAkernel_Laplacian), dim3(grid), dim3(threads) , 0, 0, Src,Dst, (int) DstStride, (int) Size.height); // CUDAkernel_getLimits<<< grid, threads >>>(Dst, (int) DstStride, (int*) max, (int*) min); // find the minimum intensity in the image /* TODO: this would be a good place to do the scaling and baseline correction */ //Copy image block to host hipMemcpy2D(ImgSrcF, ImgSrcFStride * sizeof(float), Dst, DstStride * sizeof(float), Size.width * sizeof(float), Size.height, hipMemcpyDeviceToHost); //Convert image back to byte representation CopyFloat2Byte(ImgSrcF, ImgSrcFStride, ImgDst, Stride, Size); hipFree(Dst); hipFree(Src); FreePlane(ImgSrcF); } /************************************************************************** * Program entry point */ int main(int argc, char** argv) { //initialize CUDA //source and results image filenames char SampleImageFnameResCUDA1[] = "laplacian_frame1.bmp"; char *pSampleImageFpath ="data/frame1.bmp"; //preload image (acquire dimensions) int ImgWidth, ImgHeight; ROI ImgSize; int res = PreLoadBmp(pSampleImageFpath, &ImgWidth, &ImgHeight); ImgSize.width = ImgWidth; ImgSize.height = ImgHeight; #ifdef SCALEOUTPUT int i, j, index, numPixels; byte min=0, max=0, range, pixel, value; #endif //CONSOLE INFORMATION: saying hello to user printf("CUDA Image Sharpning \n"); printf("===================================\n"); printf("Loading test image: %s... ", pSampleImageFpath); if (res) { printf("\nError: Image file not found or invalid!\n"); printf("Press ENTER to exit...\n"); getchar(); //finalize exit(0); } //check image dimensions are multiples of BLOCK_SIZE if (ImgWidth % BLOCK_SIZE != 0 || ImgHeight % BLOCK_SIZE != 0) { printf("\nError: Input image dimensions must be multiples of 8!\n"); printf("Press ENTER to exit...\n"); getchar(); //finalize exit(0); } printf("[%d x %d]... ", ImgWidth, ImgHeight); /**********************************************************************/ //allocate image buffers int ImgStride; byte *ImgSrc = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride); byte *ImgDstCUDA1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride); //load sample image LoadBmpAsGray(pSampleImageFpath, ImgStride, ImgSize, ImgSrc); /****** // RUNNING WRAPPERS************************************************/ printf("Success\nRunning CUDA 1 (GPU) version... "); WrapperCUDA_pixME(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize); /*********************************************************************************/ #ifdef DEBUG printf("ImgWidth,%d, ImgHeight,%d\n", ImgWidth, ImgHeight); #endif #ifdef SCALEOUTPUT #ifdef GETLIMITS numPixels = ImgWidth * ImgHeight; /* determine min and max of image - should be with a kernel */ for(i = 0; i < numPixels; i++){ pixel = ImgDstCUDA1[i]; /* get pixel */ if(pixel < min) min = pixel; else if(pixel > max) max = pixel; } printf("\nMax,%d ,Min,%d\n",max,min); #ifdef SCALE /* baseline correct and scale image - should be with a kernel*/ range = max - min; for(index = 0; index < numPixels; index++){ #ifdef DEBUG printf("index,%d\n",index); #endif pixel = ImgDstCUDA1[index]; value = (pixel - min) * 255 / max; #ifdef VERBOSE printf("old,%d, new,%d, range,%d\n",pixel,value,range); #endif ImgDstCUDA1[index] = value; } #endif /* SCALE */ #endif /* GETLIMITS */ #endif /* SCALEOUTPUT */ #ifdef OUTPUT //dump result of CUDA 1 processing printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA1); DumpBmpAsGray(SampleImageFnameResCUDA1, ImgDstCUDA1, ImgStride, ImgSize); #endif /* output */ //print speed info printf("Success\n"); //release byte planes FreePlane(ImgSrc); //finalize return 0; }
32fe114267a48273f74243d809c6afe1c6f07717.cu
/** ************************************************************************** */ #include "Common.h" #define SCALEOUTPUT #define GETLIMITS #define SCALE #define OUTPUT //include Kernels #include "laplacian_kernel.cu" void WrapperCUDA_pixME(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size) { //allocate device memory float *Src; float *Dst; size_t DstStride; /*Allocate memory in the Device */ cudaMallocPitch((void **)(&Src), &DstStride, Size.width * sizeof(float), Size.height); cudaMemset2D((void *)(Src), DstStride,0, Size.width * sizeof(float), Size.height); cudaMallocPitch((void **)(&Dst), &DstStride, Size.width * sizeof(float), Size.height); cudaMemset2D((void *)(Dst), DstStride,0, Size.width * sizeof(float), Size.height); DstStride /= sizeof(float); //convert source image to float representation int ImgSrcFStride; float *ImgSrcF = MallocPlaneFloat(Size.width, Size.height, &ImgSrcFStride); CopyByte2Float(ImgSrc, Stride, ImgSrcF, ImgSrcFStride, Size); //Copy from host memory to device cudaMemcpy2D(Src, ImgSrcFStride * sizeof(float), ImgSrcF, ImgSrcFStride * sizeof(float), Size.width * sizeof(float), Size.height,cudaMemcpyHostToDevice); //setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE); // TODO: the algorithm should be as follows: // (1) find the edges with the laplacian kernel = dI // (2) baseline correct and scale the laplacian "edges" image to 0,255 = dI' // (3) subtract the corrected edges from the image -- I' = I-dI' // (4) correct and scale image = I-sharpened //execute CUDA kernel CUDAkernel_Laplacian<<< grid, threads >>>(Src,Dst, (int) DstStride, (int) Size.height); // CUDAkernel_getLimits<<< grid, threads >>>(Dst, (int) DstStride, (int*) max, (int*) min); // find the minimum intensity in the image /* TODO: this would be a good place to do the scaling and baseline correction */ //Copy image block to host cudaMemcpy2D(ImgSrcF, ImgSrcFStride * sizeof(float), Dst, DstStride * sizeof(float), Size.width * sizeof(float), Size.height, cudaMemcpyDeviceToHost); //Convert image back to byte representation CopyFloat2Byte(ImgSrcF, ImgSrcFStride, ImgDst, Stride, Size); cudaFree(Dst); cudaFree(Src); FreePlane(ImgSrcF); } /************************************************************************** * Program entry point */ int main(int argc, char** argv) { //initialize CUDA //source and results image filenames char SampleImageFnameResCUDA1[] = "laplacian_frame1.bmp"; char *pSampleImageFpath ="data/frame1.bmp"; //preload image (acquire dimensions) int ImgWidth, ImgHeight; ROI ImgSize; int res = PreLoadBmp(pSampleImageFpath, &ImgWidth, &ImgHeight); ImgSize.width = ImgWidth; ImgSize.height = ImgHeight; #ifdef SCALEOUTPUT int i, j, index, numPixels; byte min=0, max=0, range, pixel, value; #endif //CONSOLE INFORMATION: saying hello to user printf("CUDA Image Sharpning \n"); printf("===================================\n"); printf("Loading test image: %s... ", pSampleImageFpath); if (res) { printf("\nError: Image file not found or invalid!\n"); printf("Press ENTER to exit...\n"); getchar(); //finalize exit(0); } //check image dimensions are multiples of BLOCK_SIZE if (ImgWidth % BLOCK_SIZE != 0 || ImgHeight % BLOCK_SIZE != 0) { printf("\nError: Input image dimensions must be multiples of 8!\n"); printf("Press ENTER to exit...\n"); getchar(); //finalize exit(0); } printf("[%d x %d]... ", ImgWidth, ImgHeight); /**********************************************************************/ //allocate image buffers int ImgStride; byte *ImgSrc = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride); byte *ImgDstCUDA1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride); //load sample image LoadBmpAsGray(pSampleImageFpath, ImgStride, ImgSize, ImgSrc); /****** // RUNNING WRAPPERS************************************************/ printf("Success\nRunning CUDA 1 (GPU) version... "); WrapperCUDA_pixME(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize); /*********************************************************************************/ #ifdef DEBUG printf("ImgWidth,%d, ImgHeight,%d\n", ImgWidth, ImgHeight); #endif #ifdef SCALEOUTPUT #ifdef GETLIMITS numPixels = ImgWidth * ImgHeight; /* determine min and max of image - should be with a kernel */ for(i = 0; i < numPixels; i++){ pixel = ImgDstCUDA1[i]; /* get pixel */ if(pixel < min) min = pixel; else if(pixel > max) max = pixel; } printf("\nMax,%d ,Min,%d\n",max,min); #ifdef SCALE /* baseline correct and scale image - should be with a kernel*/ range = max - min; for(index = 0; index < numPixels; index++){ #ifdef DEBUG printf("index,%d\n",index); #endif pixel = ImgDstCUDA1[index]; value = (pixel - min) * 255 / max; #ifdef VERBOSE printf("old,%d, new,%d, range,%d\n",pixel,value,range); #endif ImgDstCUDA1[index] = value; } #endif /* SCALE */ #endif /* GETLIMITS */ #endif /* SCALEOUTPUT */ #ifdef OUTPUT //dump result of CUDA 1 processing printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA1); DumpBmpAsGray(SampleImageFnameResCUDA1, ImgDstCUDA1, ImgStride, ImgSize); #endif /* output */ //print speed info printf("Success\n"); //release byte planes FreePlane(ImgSrc); //finalize return 0; }
2a96d5c137ac050b64dffac3cb1ab1cda8b573fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include "matvec.h" #include "struct.h" #include "constants.h" __global__ void frameNmlKernel(double *d_nmlMat, double *d_lmkMat, int *d_nmlVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector nmlSumVec = {0.0, 0.0, 0.0}; for ( int nmlIdx = 0; nmlIdx < NMLNUM; ++nmlIdx ) { int q0Idx = d_nmlVtxMat[(3 * nmlIdx ) * elmNum + elmIdx]; int q1Idx = d_nmlVtxMat[(3 * nmlIdx + 1) * elmNum + elmIdx]; int q2Idx = d_nmlVtxMat[(3 * nmlIdx + 2) * elmNum + elmIdx]; vector q0Vec, q1Vec, q2Vec; getVector(q0Vec, d_lmkMat, q0Idx, lmkNum); getVector(q1Vec, d_lmkMat, q1Idx, lmkNum); getVector(q2Vec, d_lmkMat, q2Idx, lmkNum); vector q10Vec, q20Vec; vectorSubtract(q10Vec, q1Vec, q0Vec); vectorSubtract(q20Vec, q2Vec, q0Vec); vector crsVec; crossProduct(crsVec, q10Vec, q20Vec); double crsLen = eucnorm(crsVec); nmlSumVec.x += crsVec.x / crsLen; nmlSumVec.y += crsVec.y / crsLen; nmlSumVec.z += crsVec.z / crsLen; } double nmlLen = eucnorm(nmlSumVec); d_nmlMat[ elmIdx] = nmlSumVec.x / nmlLen; d_nmlMat[ elmNum + elmIdx] = nmlSumVec.y / nmlLen; d_nmlMat[2 * elmNum + elmIdx] = nmlSumVec.z / nmlLen; } return; } __global__ void frameTsvKernel(double *d_tsvMat, double *d_lmkMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector tsvSumVec = {0.0, 0.0, 0.0}; for ( int tsvIdx = 0; tsvIdx < TSVNUM; ++tsvIdx ) { int dwnIdx = d_tsvVtxMat[(2 * tsvIdx ) * elmNum + elmIdx]; int uppIdx = d_tsvVtxMat[(2 * tsvIdx + 1) * elmNum + elmIdx]; vector dwnVec, uppVec; getVector(dwnVec, d_lmkMat, dwnIdx, lmkNum); getVector(uppVec, d_lmkMat, uppIdx, lmkNum); vector difVec; vectorSubtract(difVec, uppVec, dwnVec); double difLen = eucnorm(difVec); tsvSumVec.x += difVec.x / difLen; tsvSumVec.y += difVec.y / difLen; tsvSumVec.z += difVec.z / difLen; } double tsvLen = eucnorm(tsvSumVec); d_tsvMat[ elmIdx] = tsvSumVec.x / tsvLen; d_tsvMat[ elmNum + elmIdx] = tsvSumVec.y / tsvLen; d_tsvMat[2 * elmNum + elmIdx] = tsvSumVec.z / tsvLen; } return; } void computeFrame(double *d_nmlMat, double *d_tsvMat, double *d_lmkMat, int *d_nmlVtxMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int blkNum = (elmNum - 1) / BLKDIM + 1; hipLaunchKernelGGL(( frameNmlKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_nmlMat, d_lmkMat, d_nmlVtxMat, lmkNum, elmNum); hipLaunchKernelGGL(( frameTsvKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_tsvMat, d_lmkMat, d_tsvVtxMat, lmkNum, elmNum); return; } // --- __global__ void frameNmlKernel(double *d_nmlMat, double *d_nmlLenVec, double *d_lmkMat, int *d_nmlVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector nmlSumVec = {0.0, 0.0, 0.0}; for ( int nmlIdx = 0; nmlIdx < NMLNUM; ++nmlIdx ) { int q0Idx = d_nmlVtxMat[(3 * nmlIdx ) * elmNum + elmIdx]; int q1Idx = d_nmlVtxMat[(3 * nmlIdx + 1) * elmNum + elmIdx]; int q2Idx = d_nmlVtxMat[(3 * nmlIdx + 2) * elmNum + elmIdx]; vector q0Vec, q1Vec, q2Vec; getVector(q0Vec, d_lmkMat, q0Idx, lmkNum); getVector(q1Vec, d_lmkMat, q1Idx, lmkNum); getVector(q2Vec, d_lmkMat, q2Idx, lmkNum); vector q10Vec, q20Vec; vectorSubtract(q10Vec, q1Vec, q0Vec); vectorSubtract(q20Vec, q2Vec, q0Vec); vector crsVec; crossProduct(crsVec, q10Vec, q20Vec); double crsLen = eucnorm(crsVec); nmlSumVec.x += crsVec.x / crsLen; nmlSumVec.y += crsVec.y / crsLen; nmlSumVec.z += crsVec.z / crsLen; } double nmlLen = eucnorm(nmlSumVec); d_nmlLenVec[elmIdx] = nmlLen; d_nmlMat[ elmIdx] = nmlSumVec.x / nmlLen; d_nmlMat[ elmNum + elmIdx] = nmlSumVec.y / nmlLen; d_nmlMat[2 * elmNum + elmIdx] = nmlSumVec.z / nmlLen; } return; } __global__ void frameTsvKernel(double *d_tsvMat, double *d_tsvLenVec, double *d_lmkMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector tsvSumVec = {0.0, 0.0, 0.0}; for ( int tsvIdx = 0; tsvIdx < TSVNUM; ++tsvIdx ) { int dwnIdx = d_tsvVtxMat[(2 * tsvIdx ) * elmNum + elmIdx]; int uppIdx = d_tsvVtxMat[(2 * tsvIdx + 1) * elmNum + elmIdx]; vector dwnVec, uppVec; getVector(dwnVec, d_lmkMat, dwnIdx, lmkNum); getVector(uppVec, d_lmkMat, uppIdx, lmkNum); vector difVec; vectorSubtract(difVec, uppVec, dwnVec); double difLen = eucnorm(difVec); tsvSumVec.x += difVec.x / difLen; tsvSumVec.y += difVec.y / difLen; tsvSumVec.z += difVec.z / difLen; } double tsvLen = eucnorm(tsvSumVec); d_tsvLenVec[elmIdx] = tsvLen; d_tsvMat[ elmIdx] = tsvSumVec.x / tsvLen; d_tsvMat[ elmNum + elmIdx] = tsvSumVec.y / tsvLen; d_tsvMat[2 * elmNum + elmIdx] = tsvSumVec.z / tsvLen; } return; } void computeFrame(double *d_nmlMat, double *d_tsvMat, double *d_nmlLenVec, double *d_tsvLenVec, double *d_lmkMat, int *d_nmlVtxMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int blkNum = (elmNum - 1) / BLKDIM + 1; hipLaunchKernelGGL(( frameNmlKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_nmlMat, d_nmlLenVec, d_lmkMat, d_nmlVtxMat, lmkNum, elmNum); hipLaunchKernelGGL(( frameTsvKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_tsvMat, d_tsvLenVec, d_lmkMat, d_tsvVtxMat, lmkNum, elmNum); return; }
2a96d5c137ac050b64dffac3cb1ab1cda8b573fb.cu
#include <cmath> #include "matvec.h" #include "struct.h" #include "constants.h" __global__ void frameNmlKernel(double *d_nmlMat, double *d_lmkMat, int *d_nmlVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector nmlSumVec = {0.0, 0.0, 0.0}; for ( int nmlIdx = 0; nmlIdx < NMLNUM; ++nmlIdx ) { int q0Idx = d_nmlVtxMat[(3 * nmlIdx ) * elmNum + elmIdx]; int q1Idx = d_nmlVtxMat[(3 * nmlIdx + 1) * elmNum + elmIdx]; int q2Idx = d_nmlVtxMat[(3 * nmlIdx + 2) * elmNum + elmIdx]; vector q0Vec, q1Vec, q2Vec; getVector(q0Vec, d_lmkMat, q0Idx, lmkNum); getVector(q1Vec, d_lmkMat, q1Idx, lmkNum); getVector(q2Vec, d_lmkMat, q2Idx, lmkNum); vector q10Vec, q20Vec; vectorSubtract(q10Vec, q1Vec, q0Vec); vectorSubtract(q20Vec, q2Vec, q0Vec); vector crsVec; crossProduct(crsVec, q10Vec, q20Vec); double crsLen = eucnorm(crsVec); nmlSumVec.x += crsVec.x / crsLen; nmlSumVec.y += crsVec.y / crsLen; nmlSumVec.z += crsVec.z / crsLen; } double nmlLen = eucnorm(nmlSumVec); d_nmlMat[ elmIdx] = nmlSumVec.x / nmlLen; d_nmlMat[ elmNum + elmIdx] = nmlSumVec.y / nmlLen; d_nmlMat[2 * elmNum + elmIdx] = nmlSumVec.z / nmlLen; } return; } __global__ void frameTsvKernel(double *d_tsvMat, double *d_lmkMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector tsvSumVec = {0.0, 0.0, 0.0}; for ( int tsvIdx = 0; tsvIdx < TSVNUM; ++tsvIdx ) { int dwnIdx = d_tsvVtxMat[(2 * tsvIdx ) * elmNum + elmIdx]; int uppIdx = d_tsvVtxMat[(2 * tsvIdx + 1) * elmNum + elmIdx]; vector dwnVec, uppVec; getVector(dwnVec, d_lmkMat, dwnIdx, lmkNum); getVector(uppVec, d_lmkMat, uppIdx, lmkNum); vector difVec; vectorSubtract(difVec, uppVec, dwnVec); double difLen = eucnorm(difVec); tsvSumVec.x += difVec.x / difLen; tsvSumVec.y += difVec.y / difLen; tsvSumVec.z += difVec.z / difLen; } double tsvLen = eucnorm(tsvSumVec); d_tsvMat[ elmIdx] = tsvSumVec.x / tsvLen; d_tsvMat[ elmNum + elmIdx] = tsvSumVec.y / tsvLen; d_tsvMat[2 * elmNum + elmIdx] = tsvSumVec.z / tsvLen; } return; } void computeFrame(double *d_nmlMat, double *d_tsvMat, double *d_lmkMat, int *d_nmlVtxMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int blkNum = (elmNum - 1) / BLKDIM + 1; frameNmlKernel <<<blkNum, BLKDIM>>> (d_nmlMat, d_lmkMat, d_nmlVtxMat, lmkNum, elmNum); frameTsvKernel <<<blkNum, BLKDIM>>> (d_tsvMat, d_lmkMat, d_tsvVtxMat, lmkNum, elmNum); return; } // --- __global__ void frameNmlKernel(double *d_nmlMat, double *d_nmlLenVec, double *d_lmkMat, int *d_nmlVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector nmlSumVec = {0.0, 0.0, 0.0}; for ( int nmlIdx = 0; nmlIdx < NMLNUM; ++nmlIdx ) { int q0Idx = d_nmlVtxMat[(3 * nmlIdx ) * elmNum + elmIdx]; int q1Idx = d_nmlVtxMat[(3 * nmlIdx + 1) * elmNum + elmIdx]; int q2Idx = d_nmlVtxMat[(3 * nmlIdx + 2) * elmNum + elmIdx]; vector q0Vec, q1Vec, q2Vec; getVector(q0Vec, d_lmkMat, q0Idx, lmkNum); getVector(q1Vec, d_lmkMat, q1Idx, lmkNum); getVector(q2Vec, d_lmkMat, q2Idx, lmkNum); vector q10Vec, q20Vec; vectorSubtract(q10Vec, q1Vec, q0Vec); vectorSubtract(q20Vec, q2Vec, q0Vec); vector crsVec; crossProduct(crsVec, q10Vec, q20Vec); double crsLen = eucnorm(crsVec); nmlSumVec.x += crsVec.x / crsLen; nmlSumVec.y += crsVec.y / crsLen; nmlSumVec.z += crsVec.z / crsLen; } double nmlLen = eucnorm(nmlSumVec); d_nmlLenVec[elmIdx] = nmlLen; d_nmlMat[ elmIdx] = nmlSumVec.x / nmlLen; d_nmlMat[ elmNum + elmIdx] = nmlSumVec.y / nmlLen; d_nmlMat[2 * elmNum + elmIdx] = nmlSumVec.z / nmlLen; } return; } __global__ void frameTsvKernel(double *d_tsvMat, double *d_tsvLenVec, double *d_lmkMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int elmIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( elmIdx < elmNum ) { vector tsvSumVec = {0.0, 0.0, 0.0}; for ( int tsvIdx = 0; tsvIdx < TSVNUM; ++tsvIdx ) { int dwnIdx = d_tsvVtxMat[(2 * tsvIdx ) * elmNum + elmIdx]; int uppIdx = d_tsvVtxMat[(2 * tsvIdx + 1) * elmNum + elmIdx]; vector dwnVec, uppVec; getVector(dwnVec, d_lmkMat, dwnIdx, lmkNum); getVector(uppVec, d_lmkMat, uppIdx, lmkNum); vector difVec; vectorSubtract(difVec, uppVec, dwnVec); double difLen = eucnorm(difVec); tsvSumVec.x += difVec.x / difLen; tsvSumVec.y += difVec.y / difLen; tsvSumVec.z += difVec.z / difLen; } double tsvLen = eucnorm(tsvSumVec); d_tsvLenVec[elmIdx] = tsvLen; d_tsvMat[ elmIdx] = tsvSumVec.x / tsvLen; d_tsvMat[ elmNum + elmIdx] = tsvSumVec.y / tsvLen; d_tsvMat[2 * elmNum + elmIdx] = tsvSumVec.z / tsvLen; } return; } void computeFrame(double *d_nmlMat, double *d_tsvMat, double *d_nmlLenVec, double *d_tsvLenVec, double *d_lmkMat, int *d_nmlVtxMat, int *d_tsvVtxMat, int lmkNum, int elmNum) { int blkNum = (elmNum - 1) / BLKDIM + 1; frameNmlKernel <<<blkNum, BLKDIM>>> (d_nmlMat, d_nmlLenVec, d_lmkMat, d_nmlVtxMat, lmkNum, elmNum); frameTsvKernel <<<blkNum, BLKDIM>>> (d_tsvMat, d_tsvLenVec, d_lmkMat, d_tsvVtxMat, lmkNum, elmNum); return; }
8e0658a508b4734b8a85f6f9c2f61c900a790ce9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) __global__ void sumReduction(float * input, float * output, int len) { __shared__ float pSum[2 * BLOCK_SIZE]; unsigned int i = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE; if (start + i < len) pSum[i] = input[start + i]; else pSum[i] = 0; if (start + BLOCK_SIZE + i < len) pSum[BLOCK_SIZE + i] = input[start + BLOCK_SIZE + i]; else pSum[BLOCK_SIZE + i] = 0; for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (i < stride) pSum[i] += pSum[i+stride]; } if (i == 0) output[blockIdx.x] = pSum[0]; } int main(int argc, char ** argv) { int ii; wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numInputElements; // number of elements in the input list int numOutputElements; // number of elements in the output list numInputElements = 9999; hostInput = (float*) malloc(numInputElements * sizeof(float)); float answer = 0.0; for(int i = 0; i < numInputElements; i++) { hostInput[i] = i; answer += hostInput[i]; } printf("Expected answer %f\n", answer); // args = wbArg_read(argc, argv); //hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements); numOutputElements = numInputElements / (BLOCK_SIZE<<1); if (numInputElements % (BLOCK_SIZE<<1)) { numOutputElements++; } hostOutput = (float*) malloc(numOutputElements * sizeof(float)); //wbLog(TRACE, "The number of input elements in the input is ", numInputElements); //wbLog(TRACE, "The number of output elements in the input is ", numOutputElements); //@@ Allocate GPU memory here hipMalloc(&deviceInput, sizeof(float) * numInputElements); hipMalloc(&deviceOutput, sizeof(float) * numOutputElements); //@@ Copy memory to the GPU here hipMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements, hipMemcpyHostToDevice); //@@ Initialize the grid and block dimensions here dim3 dimGrid(numOutputElements, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( sumReduction), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInput, deviceOutput, numInputElements); hipDeviceSynchronize(); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, hipMemcpyDeviceToHost); /******************************************************************** * Reduce output vector on the host * NOTE: One could also perform the reduction of the output vector * recursively and support any size input. For simplicity, we do not * require that for this lab. ********************************************************************/ for (ii = 1; ii < numOutputElements; ii++) { hostOutput[0] += hostOutput[ii]; } printf("Computed answer %f\n", hostOutput[0]); //@@ Free the GPU memory here hipFree(deviceInput); hipFree(deviceOutput); free(hostInput); free(hostOutput); return 0; }
8e0658a508b4734b8a85f6f9c2f61c900a790ce9.cu
#define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) __global__ void sumReduction(float * input, float * output, int len) { __shared__ float pSum[2 * BLOCK_SIZE]; unsigned int i = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE; if (start + i < len) pSum[i] = input[start + i]; else pSum[i] = 0; if (start + BLOCK_SIZE + i < len) pSum[BLOCK_SIZE + i] = input[start + BLOCK_SIZE + i]; else pSum[BLOCK_SIZE + i] = 0; for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (i < stride) pSum[i] += pSum[i+stride]; } if (i == 0) output[blockIdx.x] = pSum[0]; } int main(int argc, char ** argv) { int ii; wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numInputElements; // number of elements in the input list int numOutputElements; // number of elements in the output list numInputElements = 9999; hostInput = (float*) malloc(numInputElements * sizeof(float)); float answer = 0.0; for(int i = 0; i < numInputElements; i++) { hostInput[i] = i; answer += hostInput[i]; } printf("Expected answer %f\n", answer); // args = wbArg_read(argc, argv); //hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numInputElements); numOutputElements = numInputElements / (BLOCK_SIZE<<1); if (numInputElements % (BLOCK_SIZE<<1)) { numOutputElements++; } hostOutput = (float*) malloc(numOutputElements * sizeof(float)); //wbLog(TRACE, "The number of input elements in the input is ", numInputElements); //wbLog(TRACE, "The number of output elements in the input is ", numOutputElements); //@@ Allocate GPU memory here cudaMalloc(&deviceInput, sizeof(float) * numInputElements); cudaMalloc(&deviceOutput, sizeof(float) * numOutputElements); //@@ Copy memory to the GPU here cudaMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements, cudaMemcpyHostToDevice); //@@ Initialize the grid and block dimensions here dim3 dimGrid(numOutputElements, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); //@@ Launch the GPU Kernel here sumReduction<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, numInputElements); cudaDeviceSynchronize(); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, cudaMemcpyDeviceToHost); /******************************************************************** * Reduce output vector on the host * NOTE: One could also perform the reduction of the output vector * recursively and support any size input. For simplicity, we do not * require that for this lab. ********************************************************************/ for (ii = 1; ii < numOutputElements; ii++) { hostOutput[0] += hostOutput[ii]; } printf("Computed answer %f\n", hostOutput[0]); //@@ Free the GPU memory here cudaFree(deviceInput); cudaFree(deviceOutput); free(hostInput); free(hostOutput); return 0; }
c84900e08cb596edbe573974eb7f822df6d15022.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "callOperation.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int x = 1; int *res = NULL; hipMalloc(&res, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,x,res,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,x,res,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,x,res,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c84900e08cb596edbe573974eb7f822df6d15022.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "callOperation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int x = 1; int *res = NULL; cudaMalloc(&res, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); callOperation<<<gridBlock,threadBlock>>>(a,b,x,res,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { callOperation<<<gridBlock,threadBlock>>>(a,b,x,res,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { callOperation<<<gridBlock,threadBlock>>>(a,b,x,res,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
09cfa6f9d9681213d8b12f43f152e03461a47ca0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "mse_loss_op.h" namespace caffe2 { namespace { template <typename T> __global__ void MSEKernel( const int n, const T* in, T* out, T beta) { // f(x) = x^2 / beta CUDA_1D_KERNEL_LOOP(index, n) { T val = in[index]; T abs_val = abs(val); out[index] = val * val / beta; } } template <typename T> __global__ void MSEGradientKernel( const int n, const T* in, T* out, const T* d_loss_data, T norm, T beta) { // f'(x) = 2 * x / beta // We also scale by norm * d_loss in this kernel for convenience CUDA_1D_KERNEL_LOOP(index, n) { T val = in[index]; T abs_val = abs(val); T d_loss = *d_loss_data; out[index] = 2 * norm * d_loss * val / beta; } } } // namespace template<> bool MSELossOp<float, CUDAContext>::RunOnDevice() { auto& Y_hat = Input(0); auto& Y = Input(1); auto& alpha_in = Input(2); auto& alpha_out = Input(3); auto* avg_loss = Output(0); int N = Y.dim32(0); // Require the same number of elements along axis 0 (batch size), but // otherwise don't care about the shape (just the number of elements) CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0), "Y_hat and Y must have the same number of elements along axis 0"); CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(), "Y_hat and Y must have the same number of elements"); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size()); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size()); avg_loss->Resize(vector<TIndex>()); buff_.ResizeLike(Y); // Difference // d := y_hat - y math::Sub<float, CUDAContext>( Y.size(), Y_hat.data<float>(), Y.data<float>(), buff_.mutable_data<float>(), &context_); // Element-wise weighted difference (can be used to ignore or reweight // specific components) // d := alpha_in * (y_hat - y) math::Mul<float, CUDAContext>( buff_.size(), buff_.data<float>(), alpha_in.data<float>(), buff_.mutable_data<float>(), &context_); // Element-wise MSE loss // l := MSE(alpha_in * (y_hat - y)) hipLaunchKernelGGL(( MSEKernel<float>) , dim3(CAFFE_GET_BLOCKS(buff_.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), buff_.size(), buff_.data<float>(), buff_.mutable_data<float>(), beta_); // Element-wise weighted MSE loss (can be used to specify a per-element // loss weight) // l := alpha_out * MSE(alpha_in * (y_hat - y)) math::Mul<float, CUDAContext>( buff_.size(), buff_.data<float>(), alpha_out.data<float>(), buff_.mutable_data<float>(), &context_); // Sum of all losses // al := sum_i l_i float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( buff_.size(), buff_.data<float>(), avg_loss_data, &context_); // Average of input batch size // al := 1/N * al math::Scale<float, CUDAContext>( 1, scale_ / N, avg_loss_data, avg_loss_data, &context_); return true; } template<> bool MSELossGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y_hat = Input(0); auto& Y = Input(1); auto& alpha_in = Input(2); auto& alpha_out = Input(3); auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOuput") auto* d_Y_hat = Output(0); // gradient of net w.r.t. Y_hat ("gradInput") // We intentially don't compute gradients for Y, alpha_{in,out} since they // are not needed (can change in the future if desired) int N = Y.dim32(0); // Require the same number of elements along axis 0 (batch size), but // otherwise don't care about the shape (just the number of elements) CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0), "Y_hat and Y must have the same number of elements along axis 0"); CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(), "Y_hat and Y must have the same number of elements"); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size()); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size()); CAFFE_ENFORCE_EQ(d_avg_loss.size(), 1); d_Y_hat->ResizeLike(Y_hat); buff_.ResizeLike(Y); // Difference // d := y_hat - y math::Sub<float, CUDAContext>( Y.size(), Y_hat.data<float>(), Y.data<float>(), buff_.mutable_data<float>(), &context_); // Element-wise weighted difference (can be used to ignore or reweight // specific components) // d := alpha_in * (y_hat - y) math::Mul<float, CUDAContext>( buff_.size(), buff_.data<float>(), alpha_in.data<float>(), buff_.mutable_data<float>(), &context_); // d_Y_hat := d_avg_loss / N * MSE'(alpha_in * (y_hat - y)) hipLaunchKernelGGL(( MSEGradientKernel<float>) , dim3(CAFFE_GET_BLOCKS(buff_.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), buff_.size(), buff_.data<float>(), d_Y_hat->mutable_data<float>(), d_avg_loss.data<float>(), scale_ / N, beta_); // Element-wise scale by alpha_in and alpha_out math::Mul<float, CUDAContext>( d_Y_hat->size(), d_Y_hat->data<float>(), alpha_in.data<float>(), d_Y_hat->mutable_data<float>(), &context_); math::Mul<float, CUDAContext>( d_Y_hat->size(), d_Y_hat->data<float>(), alpha_out.data<float>(), d_Y_hat->mutable_data<float>(), &context_); return true; } REGISTER_CUDA_OPERATOR(MSELoss, MSELossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MSELossGradient, MSELossGradientOp<float, CUDAContext>); } // namespace caffe2
09cfa6f9d9681213d8b12f43f152e03461a47ca0.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "mse_loss_op.h" namespace caffe2 { namespace { template <typename T> __global__ void MSEKernel( const int n, const T* in, T* out, T beta) { // f(x) = x^2 / beta CUDA_1D_KERNEL_LOOP(index, n) { T val = in[index]; T abs_val = abs(val); out[index] = val * val / beta; } } template <typename T> __global__ void MSEGradientKernel( const int n, const T* in, T* out, const T* d_loss_data, T norm, T beta) { // f'(x) = 2 * x / beta // We also scale by norm * d_loss in this kernel for convenience CUDA_1D_KERNEL_LOOP(index, n) { T val = in[index]; T abs_val = abs(val); T d_loss = *d_loss_data; out[index] = 2 * norm * d_loss * val / beta; } } } // namespace template<> bool MSELossOp<float, CUDAContext>::RunOnDevice() { auto& Y_hat = Input(0); auto& Y = Input(1); auto& alpha_in = Input(2); auto& alpha_out = Input(3); auto* avg_loss = Output(0); int N = Y.dim32(0); // Require the same number of elements along axis 0 (batch size), but // otherwise don't care about the shape (just the number of elements) CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0), "Y_hat and Y must have the same number of elements along axis 0"); CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(), "Y_hat and Y must have the same number of elements"); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size()); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size()); avg_loss->Resize(vector<TIndex>()); buff_.ResizeLike(Y); // Difference // d := y_hat - y math::Sub<float, CUDAContext>( Y.size(), Y_hat.data<float>(), Y.data<float>(), buff_.mutable_data<float>(), &context_); // Element-wise weighted difference (can be used to ignore or reweight // specific components) // d := alpha_in * (y_hat - y) math::Mul<float, CUDAContext>( buff_.size(), buff_.data<float>(), alpha_in.data<float>(), buff_.mutable_data<float>(), &context_); // Element-wise MSE loss // l := MSE(alpha_in * (y_hat - y)) MSEKernel<float> <<<CAFFE_GET_BLOCKS(buff_.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( buff_.size(), buff_.data<float>(), buff_.mutable_data<float>(), beta_); // Element-wise weighted MSE loss (can be used to specify a per-element // loss weight) // l := alpha_out * MSE(alpha_in * (y_hat - y)) math::Mul<float, CUDAContext>( buff_.size(), buff_.data<float>(), alpha_out.data<float>(), buff_.mutable_data<float>(), &context_); // Sum of all losses // al := sum_i l_i float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( buff_.size(), buff_.data<float>(), avg_loss_data, &context_); // Average of input batch size // al := 1/N * al math::Scale<float, CUDAContext>( 1, scale_ / N, avg_loss_data, avg_loss_data, &context_); return true; } template<> bool MSELossGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y_hat = Input(0); auto& Y = Input(1); auto& alpha_in = Input(2); auto& alpha_out = Input(3); auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOuput") auto* d_Y_hat = Output(0); // gradient of net w.r.t. Y_hat ("gradInput") // We intentially don't compute gradients for Y, alpha_{in,out} since they // are not needed (can change in the future if desired) int N = Y.dim32(0); // Require the same number of elements along axis 0 (batch size), but // otherwise don't care about the shape (just the number of elements) CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0), "Y_hat and Y must have the same number of elements along axis 0"); CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(), "Y_hat and Y must have the same number of elements"); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size()); CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size()); CAFFE_ENFORCE_EQ(d_avg_loss.size(), 1); d_Y_hat->ResizeLike(Y_hat); buff_.ResizeLike(Y); // Difference // d := y_hat - y math::Sub<float, CUDAContext>( Y.size(), Y_hat.data<float>(), Y.data<float>(), buff_.mutable_data<float>(), &context_); // Element-wise weighted difference (can be used to ignore or reweight // specific components) // d := alpha_in * (y_hat - y) math::Mul<float, CUDAContext>( buff_.size(), buff_.data<float>(), alpha_in.data<float>(), buff_.mutable_data<float>(), &context_); // d_Y_hat := d_avg_loss / N * MSE'(alpha_in * (y_hat - y)) MSEGradientKernel<float> <<<CAFFE_GET_BLOCKS(buff_.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( buff_.size(), buff_.data<float>(), d_Y_hat->mutable_data<float>(), d_avg_loss.data<float>(), scale_ / N, beta_); // Element-wise scale by alpha_in and alpha_out math::Mul<float, CUDAContext>( d_Y_hat->size(), d_Y_hat->data<float>(), alpha_in.data<float>(), d_Y_hat->mutable_data<float>(), &context_); math::Mul<float, CUDAContext>( d_Y_hat->size(), d_Y_hat->data<float>(), alpha_out.data<float>(), d_Y_hat->mutable_data<float>(), &context_); return true; } REGISTER_CUDA_OPERATOR(MSELoss, MSELossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MSELossGradient, MSELossGradientOp<float, CUDAContext>); } // namespace caffe2
b76979751061db69a676bb516247426f2d01c6ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "caffe2/operators/elementwise_op.h" #include "caffe2/operators/math_ops.h" namespace caffe2 { template <typename T> __global__ void SqrtKernel(const int N, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = sqrt(x[i]); } } struct SqrtCUDAFunctor { template <typename T> inline void operator()(const int n, const T* x, T* y, CUDAContext* device_context) { hipLaunchKernelGGL(( SqrtKernel<T>) , dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, device_context->cuda_stream(), n, x, y); return; } }; REGISTER_CUDA_OPERATOR( Sqrt, UnaryElementwiseOp<TensorTypes<float>, CUDAContext, SqrtCUDAFunctor>); } // namespace caffe2
b76979751061db69a676bb516247426f2d01c6ff.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "caffe2/core/context_gpu.h" #include "caffe2/operators/elementwise_op.h" #include "caffe2/operators/math_ops.h" namespace caffe2 { template <typename T> __global__ void SqrtKernel(const int N, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, N) { y[i] = sqrt(x[i]); } } struct SqrtCUDAFunctor { template <typename T> inline void operator()(const int n, const T* x, T* y, CUDAContext* device_context) { SqrtKernel<T> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, device_context->cuda_stream()>>>(n, x, y); return; } }; REGISTER_CUDA_OPERATOR( Sqrt, UnaryElementwiseOp<TensorTypes<float>, CUDAContext, SqrtCUDAFunctor>); } // namespace caffe2
4556b6e756908b068ff03136d8dc6c10e8c24d1a.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Label Equivalence con ottimizzazioni introdotte da Kalentev (OLE stand for Optimized Label Equivalence or equivalently for Oleksander (Kalentev) Label Equivalence) #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { // Init phase. // Labels start at value 1, to differentiate them from background, that has value 0. __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = global_row * img.step + global_col; unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col; if (global_row < img.rows && global_col < img.cols) { labels[labels_index] = img[img_index] ? (labels_index + 1) : 0; } } __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned row, unsigned col, unsigned label, unsigned labels_index) { unsigned int min = label; if (row > 0) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]); if (col > 0) min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]); if (col < labels.cols - 1) min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]); } if (row < labels.rows - 1) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]); if (col > 0) min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]); if (col < labels.cols - 1) min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]); } if (col > 0) min = MinLabel(min, labels.data[labels_index - 1]); if (col < labels.cols - 1) min = MinLabel(min, labels.data[labels_index + 1]); return min; } // Scan phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Scan(cuda::PtrStepSzi labels, char *changes) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, row, col, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } // Analysis phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Analyze(cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { unsigned index = labels_index; while (label - 1 != index) { index = label - 1; label = labels[index]; } labels[labels_index] = label; } } } } class OLE : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; char changes; char *d_changes; public: OLE() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); hipMalloc(&d_changes, sizeof(char)); grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); while (true) { changes = 0; hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes); hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } hipFree(d_changes); hipDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); hipMalloc(&d_changes, sizeof(char)); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); hipFree(d_changes); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); while (true) { changes = 0; hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes); hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } hipDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(OLE);
4556b6e756908b068ff03136d8dc6c10e8c24d1a.cu
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Label Equivalence con ottimizzazioni introdotte da Kalentev (OLE stand for Optimized Label Equivalence or equivalently for Oleksander (Kalentev) Label Equivalence) #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { // Init phase. // Labels start at value 1, to differentiate them from background, that has value 0. __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = global_row * img.step + global_col; unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col; if (global_row < img.rows && global_col < img.cols) { labels[labels_index] = img[img_index] ? (labels_index + 1) : 0; } } __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned row, unsigned col, unsigned label, unsigned labels_index) { unsigned int min = label; if (row > 0) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]); if (col > 0) min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]); if (col < labels.cols - 1) min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]); } if (row < labels.rows - 1) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]); if (col > 0) min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]); if (col < labels.cols - 1) min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]); } if (col > 0) min = MinLabel(min, labels.data[labels_index - 1]); if (col < labels.cols - 1) min = MinLabel(min, labels.data[labels_index + 1]); return min; } // Scan phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Scan(cuda::PtrStepSzi labels, char *changes) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, row, col, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } // Analysis phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Analyze(cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { unsigned index = labels_index; while (label - 1 != index) { index = label - 1; label = labels[index]; } labels[labels_index] = label; } } } } class OLE : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; char changes; char *d_changes; public: OLE() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); cudaMalloc(&d_changes, sizeof(char)); grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } cudaFree(d_changes); cudaDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); cudaMalloc(&d_changes, sizeof(char)); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); cudaFree(d_changes); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_img_labels_); } cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(OLE);
78b94eed51be46d4e942516cb7bc3c473c7b0e7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO \ 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char **argv); int rows, cols; int *data; int **wall; int *result; #define M_SEED 9 int pyramid_height; FILE *fp = fopen("result.txt", "w"); //#define BENCH_PRINT void init(int argc, char **argv) { if (argc == 4) { cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height = atoi(argv[3]); } else { fprintf(fp, "Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } data = new int[rows * cols]; wall = new int *[rows]; for (int n = 0; n < rows; n++) wall[n] = data + cols * n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { fprintf(fp, "%d ", wall[i][j]); } fprintf(fp, "\n"); } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max)) #define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x) #define MIN(a, b) ((a) <= (b) ? (a) : (b)) __global__ void dynproc_kernel(int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE - iteration * HALO * 2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols * bx - border; int blkXmax = blkX + BLOCK_SIZE - 1; // calculate the global thread coordination int xidx = blkX + tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - cols + 1) : BLOCK_SIZE - 1; int W = tx - 1; int E = tx + 1; bool isValid = IN_RANGE(tx, validXmin, validXmax); if (IN_RANGE(xidx, 0, cols - 1)) { prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i = 0; i < iteration; i++) { computed = false; if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && isValid) { computed = true; int left = prev[W]; int right = prev[E]; int up = prev[tx]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols * (startStep + i) + xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if (i == iteration - 1) break; if (computed) // Assign the computation range prev[tx] = result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed) { gpuResults[xidx] = result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows - 1; t += pyramid_height) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(pyramid_height, rows - t - 1), gpuWall, gpuResult[src], gpuResult[dst], cols, rows, t, borderCols); } return dst; } int main(int argc, char **argv) { int num_devices; hipGetDeviceCount(&num_devices); if (num_devices > 1) hipSetDevice(DEVICE); run(argc, argv); return EXIT_SUCCESS; } void run(int argc, char **argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE - (pyramid_height)*HALO * 2; int blockCols = cols / smallBlockCol + ((cols % smallBlockCol == 0) ? 0 : 1); fprintf(fp, "pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: " "%d\nblockGrid:[%d]\ntargetBlock:[%d]\n", pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows * cols; hipMalloc((void **)&gpuResult[0], sizeof(int) * cols); hipMalloc((void **)&gpuResult[1], sizeof(int) * cols); hipMemcpy(gpuResult[0], data, sizeof(int) * cols, hipMemcpyHostToDevice); hipMalloc((void **)&gpuWall, sizeof(int) * (size - cols)); hipMemcpy(gpuWall, data + cols, sizeof(int) * (size - cols), hipMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height, blockCols, borderCols); hipMemcpy(result, gpuResult[final_ret], sizeof(int) * cols, hipMemcpyDeviceToHost); #ifdef BENCH_PRINT for (int i = 0; i < cols; i++) fprintf(fp, "%d ", data[i]); fprintf(fp, "\n"); for (int i = 0; i < cols; i++) fprintf(fp, "%d ", result[i]); fprintf(fp, "\n"); #endif hipFree(gpuWall); hipFree(gpuResult[0]); hipFree(gpuResult[1]); delete[] data; delete[] wall; delete[] result; }
78b94eed51be46d4e942516cb7bc3c473c7b0e7d.cu
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO \ 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char **argv); int rows, cols; int *data; int **wall; int *result; #define M_SEED 9 int pyramid_height; FILE *fp = fopen("result.txt", "w"); //#define BENCH_PRINT void init(int argc, char **argv) { if (argc == 4) { cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height = atoi(argv[3]); } else { fprintf(fp, "Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } data = new int[rows * cols]; wall = new int *[rows]; for (int n = 0; n < rows; n++) wall[n] = data + cols * n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { fprintf(fp, "%d ", wall[i][j]); } fprintf(fp, "\n"); } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max)) #define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x) #define MIN(a, b) ((a) <= (b) ? (a) : (b)) __global__ void dynproc_kernel(int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE - iteration * HALO * 2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols * bx - border; int blkXmax = blkX + BLOCK_SIZE - 1; // calculate the global thread coordination int xidx = blkX + tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - cols + 1) : BLOCK_SIZE - 1; int W = tx - 1; int E = tx + 1; bool isValid = IN_RANGE(tx, validXmin, validXmax); if (IN_RANGE(xidx, 0, cols - 1)) { prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i = 0; i < iteration; i++) { computed = false; if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && isValid) { computed = true; int left = prev[W]; int right = prev[E]; int up = prev[tx]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols * (startStep + i) + xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if (i == iteration - 1) break; if (computed) // Assign the computation range prev[tx] = result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed) { gpuResults[xidx] = result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows - 1; t += pyramid_height) { int temp = src; src = dst; dst = temp; dynproc_kernel<<<dimGrid, dimBlock>>>( MIN(pyramid_height, rows - t - 1), gpuWall, gpuResult[src], gpuResult[dst], cols, rows, t, borderCols); } return dst; } int main(int argc, char **argv) { int num_devices; cudaGetDeviceCount(&num_devices); if (num_devices > 1) cudaSetDevice(DEVICE); run(argc, argv); return EXIT_SUCCESS; } void run(int argc, char **argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE - (pyramid_height)*HALO * 2; int blockCols = cols / smallBlockCol + ((cols % smallBlockCol == 0) ? 0 : 1); fprintf(fp, "pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: " "%d\nblockGrid:[%d]\ntargetBlock:[%d]\n", pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows * cols; cudaMalloc((void **)&gpuResult[0], sizeof(int) * cols); cudaMalloc((void **)&gpuResult[1], sizeof(int) * cols); cudaMemcpy(gpuResult[0], data, sizeof(int) * cols, cudaMemcpyHostToDevice); cudaMalloc((void **)&gpuWall, sizeof(int) * (size - cols)); cudaMemcpy(gpuWall, data + cols, sizeof(int) * (size - cols), cudaMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height, blockCols, borderCols); cudaMemcpy(result, gpuResult[final_ret], sizeof(int) * cols, cudaMemcpyDeviceToHost); #ifdef BENCH_PRINT for (int i = 0; i < cols; i++) fprintf(fp, "%d ", data[i]); fprintf(fp, "\n"); for (int i = 0; i < cols; i++) fprintf(fp, "%d ", result[i]); fprintf(fp, "\n"); #endif cudaFree(gpuWall); cudaFree(gpuResult[0]); cudaFree(gpuResult[1]); delete[] data; delete[] wall; delete[] result; }
8cdb80933164a9fb5d93e61f4b02676641b3369b.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include <stdio.h> #include <cassert> #include <hipcub/hipcub.hpp> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int SkipLayerNormPluginDynamic::initialize() TRT_NOEXCEPT { hipMalloc(&bias_gpu_, sizeof(float) * bias_size_); hipMemcpy(bias_gpu_, bias_.data(), bias_size_ * sizeof(float), hipMemcpyHostToDevice); hipMalloc(&scale_gpu_, sizeof(float) * scale_size_); hipMemcpy(scale_gpu_, scale_.data(), scale_size_ * sizeof(float), hipMemcpyHostToDevice); return 0; } void SkipLayerNormPluginDynamic::terminate() TRT_NOEXCEPT { if (bias_gpu_) { hipFree(bias_gpu_); bias_gpu_ = nullptr; } if (scale_gpu_) { hipFree(scale_gpu_); scale_gpu_ = nullptr; } } nvinfer1::DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputs[0]; } bool SkipLayerNormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { #ifdef TRT_PLUGIN_FP16_AVALIABLE return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); #else return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); #endif } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos == 1) { return in.type == prev.type && in.format == prev.format; } // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SkipLayerNormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The SkipLayerNorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SkipLayerNormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; size_t num = ProductDim(input_dims); int hidden = input_dims.d[2]; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. SkipLayerNorm-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); const float *input2 = static_cast<const float *>(inputs[1]); float *output = static_cast<float *>(outputs[0]); operators::math::SkipLayerNormFunctor<float> skip_layer_norm_func; skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_, output, eps_, stream); } else if (input_type == nvinfer1::DataType::kHALF) { #ifdef TRT_PLUGIN_FP16_AVALIABLE VLOG(1) << "TRT Plugin DataType selected. SkipLayerNorm-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); const half *input2 = static_cast<const half *>(inputs[1]); half *output = static_cast<half *>(outputs[0]); operators::math::SkipLayerNormFunctor<half> skip_layer_norm_func; skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_, output, static_cast<half>(eps_), stream); #else PADDLE_THROW(platform::errors::Fatal( "The Ernie(Bert) tensorRT plugin should be " "complied with CUDA version >= 10.0 when running with fp16. " "Please recomplie it or try to use fp32 by set " "config.SetTRTDynamicShapeInfo(min_input_shape, " "max_input_shape, opt_input_shape, true")); #endif } else { PADDLE_THROW(platform::errors::Fatal( "The SkipLayerNorm TRT Plugin's input type should be float or half.")); } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
8cdb80933164a9fb5d93e61f4b02676641b3369b.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_runtime.h> #include <stdio.h> #include <cassert> #include <cub/cub.cuh> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int SkipLayerNormPluginDynamic::initialize() TRT_NOEXCEPT { cudaMalloc(&bias_gpu_, sizeof(float) * bias_size_); cudaMemcpy(bias_gpu_, bias_.data(), bias_size_ * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&scale_gpu_, sizeof(float) * scale_size_); cudaMemcpy(scale_gpu_, scale_.data(), scale_size_ * sizeof(float), cudaMemcpyHostToDevice); return 0; } void SkipLayerNormPluginDynamic::terminate() TRT_NOEXCEPT { if (bias_gpu_) { cudaFree(bias_gpu_); bias_gpu_ = nullptr; } if (scale_gpu_) { cudaFree(scale_gpu_); scale_gpu_ = nullptr; } } nvinfer1::DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputs[0]; } bool SkipLayerNormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { #ifdef TRT_PLUGIN_FP16_AVALIABLE return (in.type == nvinfer1::DataType::kFLOAT || in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::TensorFormat::kLINEAR); #else return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); #endif } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos == 1) { return in.type == prev.type && in.format == prev.format; } // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SkipLayerNormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The SkipLayerNorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SkipLayerNormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { auto input_dims = input_desc[0].dims; size_t num = ProductDim(input_dims); int hidden = input_dims.d[2]; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. SkipLayerNorm-->fp32"; const float *input1 = static_cast<const float *>(inputs[0]); const float *input2 = static_cast<const float *>(inputs[1]); float *output = static_cast<float *>(outputs[0]); operators::math::SkipLayerNormFunctor<float> skip_layer_norm_func; skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_, output, eps_, stream); } else if (input_type == nvinfer1::DataType::kHALF) { #ifdef TRT_PLUGIN_FP16_AVALIABLE VLOG(1) << "TRT Plugin DataType selected. SkipLayerNorm-->fp16"; const half *input1 = static_cast<const half *>(inputs[0]); const half *input2 = static_cast<const half *>(inputs[1]); half *output = static_cast<half *>(outputs[0]); operators::math::SkipLayerNormFunctor<half> skip_layer_norm_func; skip_layer_norm_func(num, hidden, input1, input2, scale_gpu_, bias_gpu_, output, static_cast<half>(eps_), stream); #else PADDLE_THROW(platform::errors::Fatal( "The Ernie(Bert) tensorRT plugin should be " "complied with CUDA version >= 10.0 when running with fp16. " "Please recomplie it or try to use fp32 by set " "config.SetTRTDynamicShapeInfo(min_input_shape, " "max_input_shape, opt_input_shape, true")); #endif } else { PADDLE_THROW(platform::errors::Fatal( "The SkipLayerNorm TRT Plugin's input type should be float or half.")); } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
43eaea7bc3aa01112b6688f204b57625c7272e84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "book.h" #define UF 8 __global__ void orcu_kernel6(int n, int orcu_var3, double a1, double a2, double* y, double* x1, double* x2) { int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var3; int k=(n/UF); if (tid<=orcu_var3+k-1) { { y[tid]=y[tid]+a1*x1[tid]+a2*x2[tid]; int index = tid+k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+2*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+3*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+4*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+5*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+6*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+7*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; } } } //__global__ void orcu_kernel11(int n, int orcu_var8, double a1, double* y, double* x1) { //int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var8; //if (tid<=n-1) { //y[tid]=y[tid]+a1*x1[tid]; //} //} void axpy1(int n, double *y, double a1, double a2, double *x1, double *x2) { register int i; /*@ begin Loop( transform Composite( cuda = (16,False, False, 1) ,scalarreplace = (False, 'int') , unrolljam = (['i'], [2]) ) { for (i=0; i<=n-1; i++) { y[i]=y[i]+a1*x1[i]; } } ) @*/ hipEvent_t start, stop; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); { { int orio_lbound1=0; //{ /*declare variables*/ double *dev_y, *dev_x1, *dev_x2; int nthreads=TC; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=(n+nthreads-1)/nthreads; dimGrid.x=(dimGrid.x+UF-1)/UF; printf("num of blocks: %d\n", dimGrid.x); /*allocate device memory*/ int nbytes=n*sizeof(double); hipMalloc((void**)&dev_y,nbytes); hipMalloc((void**)&dev_x1,nbytes); hipMalloc((void**)&dev_x2,nbytes); /*copy data from host to device*/ hipMemcpy(dev_y,y,nbytes,hipMemcpyHostToDevice); hipMemcpy(dev_x1,x1,nbytes,hipMemcpyHostToDevice); hipMemcpy(dev_x2,x2,nbytes,hipMemcpyHostToDevice); /*invoke device kernel*/ int orcu_var3=orio_lbound1; HANDLE_ERROR(hipEventRecord(start, 0)); hipLaunchKernelGGL(( orcu_kernel6), dim3(dimGrid),dim3(dimBlock), 0, 0, n,orcu_var3,a1,a2,dev_y,dev_x1,dev_x2); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); /*copy data from device to host*/ HANDLE_ERROR(hipMemcpy(y,dev_y,nbytes,hipMemcpyDeviceToHost)); /*free allocated memory*/ hipFree(dev_y); hipFree(dev_x1); hipFree(dev_x2); //} //int orio_lbound2=n-((n-(0))%UF); { /*declare variables*/ //double *dev_y, *dev_x1; //int nthreads=TC; /*calculate device dimensions*/ //dim3 dimGrid, dimBlock; //dimBlock.x=nthreads; //dimGrid.x=(n+nthreads-1)/nthreads; /*allocate device memory*/ //int nbytes=n*sizeof(double); //hipMalloc((void**)&dev_y,nbytes); //hipMalloc((void**)&dev_x1,nbytes); /*copy data from host to device*/ //hipMemcpy(dev_y,y,nbytes,hipMemcpyHostToDevice); //hipMemcpy(dev_x1,x1,nbytes,hipMemcpyHostToDevice); /*invoke device kernel*/ //int orcu_var8=orio_lbound2; //orcu_kernel11<<<dimGrid,dimBlock>>>(n,orcu_var8,a1,dev_y,dev_x1); /*copy data from device to host*/ //hipMemcpy(y,dev_y,nbytes,hipMemcpyDeviceToHost); /*free allocated memory*/ //hipFree(dev_y); //hipFree(dev_x1); } } } /*@ end @*/ float passedTime; HANDLE_ERROR(hipEventElapsedTime(&passedTime, start, stop)); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); printf("timePassed: %f ms\n", passedTime); } int main(){ double* y = (double*) malloc(sizeof(double)*NN); double* x1 = (double*) malloc(sizeof(double)*NN); double* x2 = (double*) malloc(sizeof(double)*NN); double a1 = AA; double a2 = AA2; int i; for(i=0; i<NN; i++){ y[i] = i; x1[i] = i; x2[i] = i; } axpy1(NN, y, a1, a2, x1, x2); for(i=0; i<13; i++) printf("%f\n", y[i]); for(i=NN-9; i<NN; i++) printf("%f\n", y[i]); return 0; }
43eaea7bc3aa01112b6688f204b57625c7272e84.cu
#include "book.h" #define UF 8 __global__ void orcu_kernel6(int n, int orcu_var3, double a1, double a2, double* y, double* x1, double* x2) { int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var3; int k=(n/UF); if (tid<=orcu_var3+k-1) { { y[tid]=y[tid]+a1*x1[tid]+a2*x2[tid]; int index = tid+k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+2*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+3*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+4*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+5*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+6*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; index = tid+7*k; y[index]=y[index]+a1*x1[index]+a2*x2[index]; } } } //__global__ void orcu_kernel11(int n, int orcu_var8, double a1, double* y, double* x1) { //int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var8; //if (tid<=n-1) { //y[tid]=y[tid]+a1*x1[tid]; //} //} void axpy1(int n, double *y, double a1, double a2, double *x1, double *x2) { register int i; /*@ begin Loop( transform Composite( cuda = (16,False, False, 1) ,scalarreplace = (False, 'int') , unrolljam = (['i'], [2]) ) { for (i=0; i<=n-1; i++) { y[i]=y[i]+a1*x1[i]; } } ) @*/ cudaEvent_t start, stop; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); { { int orio_lbound1=0; //{ /*declare variables*/ double *dev_y, *dev_x1, *dev_x2; int nthreads=TC; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=(n+nthreads-1)/nthreads; dimGrid.x=(dimGrid.x+UF-1)/UF; printf("num of blocks: %d\n", dimGrid.x); /*allocate device memory*/ int nbytes=n*sizeof(double); cudaMalloc((void**)&dev_y,nbytes); cudaMalloc((void**)&dev_x1,nbytes); cudaMalloc((void**)&dev_x2,nbytes); /*copy data from host to device*/ cudaMemcpy(dev_y,y,nbytes,cudaMemcpyHostToDevice); cudaMemcpy(dev_x1,x1,nbytes,cudaMemcpyHostToDevice); cudaMemcpy(dev_x2,x2,nbytes,cudaMemcpyHostToDevice); /*invoke device kernel*/ int orcu_var3=orio_lbound1; HANDLE_ERROR(cudaEventRecord(start, 0)); orcu_kernel6<<<dimGrid,dimBlock>>>(n,orcu_var3,a1,a2,dev_y,dev_x1,dev_x2); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); /*copy data from device to host*/ HANDLE_ERROR(cudaMemcpy(y,dev_y,nbytes,cudaMemcpyDeviceToHost)); /*free allocated memory*/ cudaFree(dev_y); cudaFree(dev_x1); cudaFree(dev_x2); //} //int orio_lbound2=n-((n-(0))%UF); { /*declare variables*/ //double *dev_y, *dev_x1; //int nthreads=TC; /*calculate device dimensions*/ //dim3 dimGrid, dimBlock; //dimBlock.x=nthreads; //dimGrid.x=(n+nthreads-1)/nthreads; /*allocate device memory*/ //int nbytes=n*sizeof(double); //cudaMalloc((void**)&dev_y,nbytes); //cudaMalloc((void**)&dev_x1,nbytes); /*copy data from host to device*/ //cudaMemcpy(dev_y,y,nbytes,cudaMemcpyHostToDevice); //cudaMemcpy(dev_x1,x1,nbytes,cudaMemcpyHostToDevice); /*invoke device kernel*/ //int orcu_var8=orio_lbound2; //orcu_kernel11<<<dimGrid,dimBlock>>>(n,orcu_var8,a1,dev_y,dev_x1); /*copy data from device to host*/ //cudaMemcpy(y,dev_y,nbytes,cudaMemcpyDeviceToHost); /*free allocated memory*/ //cudaFree(dev_y); //cudaFree(dev_x1); } } } /*@ end @*/ float passedTime; HANDLE_ERROR(cudaEventElapsedTime(&passedTime, start, stop)); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); printf("timePassed: %f ms\n", passedTime); } int main(){ double* y = (double*) malloc(sizeof(double)*NN); double* x1 = (double*) malloc(sizeof(double)*NN); double* x2 = (double*) malloc(sizeof(double)*NN); double a1 = AA; double a2 = AA2; int i; for(i=0; i<NN; i++){ y[i] = i; x1[i] = i; x2[i] = i; } axpy1(NN, y, a1, a2, x1, x2); for(i=0; i<13; i++) printf("%f\n", y[i]); for(i=NN-9; i<NN; i++) printf("%f\n", y[i]); return 0; }
9843af0b118079002aab91e2fa67357b46946a7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CudaStuff.cuh" //#include "AllModels.cu" #include "AllModels_hip.cuh" #define ILP16 __constant__ MYFTYPE cCm[NSEG]; __constant__ MYSECONDFTYPE cE[NSEG]; __constant__ MYSECONDFTYPE cF[NSEG]; __constant__ MYDTYPE cFIdxs[NSEG*LOG_N_DEPTH]; __constant__ MYDTYPE cKs[NSEG]; __constant__ MYDTYPE cSegToComp[NSEG]; __constant__ MYDTYPE cBoolModel[NSEG * N_MODELS];//One day change this to bool __constant__ MYDTYPE cRelStarts[N_FATHERS];//nFathers __constant__ MYDTYPE cRelEnds[N_FATHERS];//nFathers __constant__ MYDTYPE cFathers[N_FATHERS];//nFathers __constant__ MYDTYPE cRelVec[N_CALL_FOR_FATHER];//nCallForFather __constant__ MYDTYPE cSegStartI[N_CALL_FOR_FATHER + 1];//nCallForFather __constant__ MYDTYPE cSegEndI[N_CALL_FOR_FATHER + 1];//nCallForFather __constant__ MYDTYPE cCompByLevel32[COMP_DEPTH*WARPSIZE];//CompDepth __constant__ MYDTYPE cCompByFLevel32[COMP_DEPTH*WARPSIZE];//CompFDepth __constant__ MYDTYPE cLRelStarts[N_L_REL];//nLRel __constant__ MYDTYPE cLRelEnds[N_L_REL];//nLRel __constant__ MYDTYPE cFLRelStarts[N_F_L_REL];//nFLRel __constant__ MYDTYPE cFLRelEnds[N_F_L_REL];//nFLRel __constant__ MYDTYPE cSonNoVec[NSEG];//InMat.N #ifdef __INTELLISENSE__ void __syncthreads(); #endif __device__ void BeforeLU(HMat InMat, MYSECONDFTYPE* uHP, MYSECONDFTYPE* bHP, MYDTYPE Depth) { MYDTYPE PIdx = threadIdx.x; MYDTYPE i, j, CurJ, CurB, t, CurLevel, LRelIndex; MYDTYPE JumctionI; LRelIndex = cLRelStarts[CurLevel]; LRelIndex = LRelIndex + cLRelEnds[CurLevel]; for (CurLevel = 0; CurLevel <= Depth; CurLevel++) { for (LRelIndex = cLRelStarts[CurLevel]; LRelIndex <= cLRelEnds[CurLevel]; LRelIndex++) { //for(LRelIndex=cLRelStarts[CurLevel];LRelIndex<=InMat.LRelEnds[CurLevel];LRelIndex++){ JumctionI = cCompByLevel32[LRelIndex*WARPSIZE + PIdx] - 1; for (i = cSegStartI[JumctionI] - 1; i<cSegEndI[JumctionI]; i++) { MYSECONDFTYPE uHPm1 = uHP[i - 1]; uHP[i] = uHP[i] - cF[i - 1] * (cE[i - 1] / uHPm1); // So far same as paper parallel uHPm1 = uHP[i - 1]; MYSECONDFTYPE bHPm1 = bHP[i - 1]; bHP[i] = bHP[i] - bHPm1*cE[i - 1] / uHPm1; // bH is y } } if (CurLevel<Depth) { for (LRelIndex = cFLRelStarts[CurLevel]; LRelIndex <= cFLRelEnds[CurLevel]; LRelIndex++) { CurB = cCompByFLevel32[(LRelIndex)*WARPSIZE + PIdx] - 1;//RB i inserted another -1 into the index RB 2 i removed the-1 from the curlevel CurJ = cFathers[CurB] - 1; MYDTYPE St = cRelStarts[CurB]; MYDTYPE En = cRelEnds[CurB]; for (j = St; j <= En; j++) { t = cRelVec[j - 1] - 1; MYSECONDFTYPE uHPm1 = uHP[t - 1]; uHP[CurJ] -= cF[t - 1] * (cE[t - 1] / uHPm1); uHPm1 = uHP[t - 1]; MYSECONDFTYPE bHPm1 = bHP[t - 1]; bHP[CurJ] -= bHPm1*cE[t - 1] / uHPm1; } } } } } #ifdef BKSUB1 __device__ void BkSub(HMat InMat, MYSECONDFTYPE* PX, MYSECONDFTYPE* PF, MYSECONDFTYPE* uHP, MYSECONDFTYPE* bHP, MYDTYPE LognDepth) { // MYDTYPE PIdx_1=threadIdx.x; // MYDTYPE NextID_1; MYDTYPE PIdx[NILP + 1]; MYDTYPE NextID[NILP + 1]; for (int count = 1; count < NILP + 1; count++) { PIdx[count] = threadIdx.x + (WARPSIZE*(count - 1)); //this is from a different superilp PX[PIdx[count]] = PX[PIdx[count]] / PF[PIdx[count]]; PF[PIdx[count]] = -cF[PIdx[count]] / PF[PIdx[count]]; } MYFTYPE OldPXj[NILP + 1]; MYFTYPE OldPXNextID[NILP + 1]; MYFTYPE OldPFj[NILP + 1]; MYFTYPE OldPFNextID[NILP + 1]; MYDTYPE i; PX[InMat.N] = 0; PF[InMat.N] = 1; for (i = 0; i<LognDepth; i++) { for (int count = 1; count < NILP + 1; count++) { NextID[count] = cFIdxs[i*InMat.N + PIdx[count]] - 1; OldPXj[count] = PX[PIdx[count]]; OldPXNextID[count] = PX[NextID[count]]; PX[PIdx[count]] = OldPXj[count] + OldPXNextID[count] * PF[PIdx[count]]; } for (int count = 1; count < NILP + 1; count++) { OldPFj[count] = PF[PIdx[count]]; OldPFNextID[count] = PF[NextID[count]]; PF[PIdx[count]] = OldPFj[count] * OldPFNextID[count]; } } } #endif #ifdef BKSUB2 __device__ void BkSub(HMat InMat, MYSECONDFTYPE* uHP, MYSECONDFTYPE* bHP, MYSECONDFTYPE* Out, MYDTYPE Depth) { // MYDTYPE PIdx_3=threadIdx.x+(WARPSIZE*2); Out[PIdx_2]=0; // might be useless? #define THISCOMMANDHEREB1(VARILP) MYDTYPE PIdx_ ## VARILP =threadIdx.x+(WARPSIZE*( ## VARILP -1)); Out[PIdx_ ## VARILP ]=0; MYDTYPE PIdx[1] = threadIdx.x + (WARPSIZE*([1] - 1)); Out[PIdx[1]] = 0; MYDTYPE j, CurJ, CurB, t; MYDTYPE JumctionI; short CurLevel, i; // get KsB from MATLAB (this comes instead of FIdxsX) // KsB=Ks; // do in matlab // bPX=zeros(1,N); // might be useless // for CurLevel=Depth:-1:0 MYDTYPE LRelIndex, k; MYFTYPE temp; for (CurLevel = Depth; CurLevel >= 0; CurLevel--) { // Run all independent set for this level, in parallel // for JumctionI=find(Level==CurLevel) % in parallel for (LRelIndex = cLRelStarts[CurLevel]; LRelIndex <= cLRelEnds[CurLevel]; LRelIndex++) { JumctionI = cCompByLevel32[LRelIndex*WARPSIZE + PIdx_1] - 1; // for i=(cSegEndI(JumctionI)):-1:(cSegStartI(JumctionI)-1) for (i = cSegEndI[JumctionI] - 1; i >= (cSegStartI[JumctionI] - 2); i--) { // k=cKsB(i+1); k = cKsB[i + 1]; // bPX(i)=(bH(i)-bPX(k)*f(i))/uH(i); Out[i] = (bHP[i] - Out[k] * cF[i]) / uHP[i]; } } } } #endif __device__ void runSimulation(HMat InMat, const MYFTYPE* __restrict__ ParamsM, MYFTYPE* ModelStates, MYFTYPE* V, Stim stim, Sim sim, MYFTYPE* VHotGlobal) { __shared__ MYSECONDFTYPE uHP_all[(NSEG + 2)*NTRACES]; __shared__ MYSECONDFTYPE bHP_all[(NSEG + 2)*NTRACES]; __shared__ MYFTYPE SMemVHot_all[WARPSIZE*NTRACES]; MYSECONDFTYPE *uHP = &uHP_all[(NSEG + 2)*threadIdx.y]; MYSECONDFTYPE *bHP = &bHP_all[(NSEG + 2)*threadIdx.y]; MYFTYPE *SMemVHot = &SMemVHot_all[(WARPSIZE)*threadIdx.y]; MYDTYPE StimID = threadIdx.y; //MYDTYPE PerStimulus; //PerStimulus = InMat.N+2; MYDTYPE NeuronID = blockIdx.x; int Nt = stim.Nt; MYFTYPE t = 0; MYSECONDFTYPE *PX, *PF; PX = bHP; PF = uHP; MYDTYPE PIdx[NILP + 1]; for (int count = 1; count < NILP + 1; count++) { PIdx[count] = threadIdx.x + (WARPSIZE*(count - 1)); } int perBlockStatesSize = (NSEG)*(NSTATES + 1); #define state_macro(stateind,segmentInd) ModelStates[NeuronID*perBlockStatesSize + stateind*NSEG+PIdx[segmentInd]]//Is this coalesced? MYFTYPE Vmid[NILP + 1]; MYFTYPE v[NILP + 1]; MYFTYPE dv[NILP + 1]; MYSECONDFTYPE sumCurrents[NILP + 1]; MYSECONDFTYPE sumCurrentsDv[NILP + 1]; MYFTYPE sumConductivity[NILP + 1]; MYFTYPE sumConductivityDv[NILP + 1]; MYDTYPE parentIndex[NILP + 1]; MYDTYPE Eidx[NILP + 1]; MYSECONDFTYPE rhs[NILP + 1]; MYSECONDFTYPE D[NILP + 1]; MYFTYPE gModel[NILP + 1]; MYFTYPE cai[NILP + 1]; MYFTYPE ica[NILP + 1]; MYFTYPE eca[NILP + 1]; MYFTYPE StimCurrent[NILP + 1]; for (int count = 1; count < NILP + 1; count++) { v[count] = V[PIdx[count]]; sumCurrents[count] = 0; sumCurrentsDv[count] = 0; sumConductivity[count] = 0; sumConductivityDv[count] = 0; bHP[count] = 0; dv[count] = 0; Eidx[count] = InMat.N - PIdx[count] - 1; parentIndex[count] = InMat.N - cKs[InMat.N - PIdx[count]]; cai[count] = 0; ica[count] = 0; eca[count] = 0; if (PIdx[count] == 0) { parentIndex[count] = 0; }; for (int count1 = 0; count1 < NSTATES; count1++) { state_macro(count1, count) = 0; } } if (Eidx[1]>InMat.N - 1) { Eidx[1] = InMat.N - 1; } MYDTYPE perThreadParamMSize = InMat.NComps*NPARAMS; #define param_macro(paramInd,segmentInd) ParamsM[NeuronID*perThreadParamMSize + paramInd*InMat.NComps+cSegToComp[segmentInd] ] #ifdef NKIN_STATES MYDTYPE perThreadStateMSize = InMat.NComps*NKIN_STATES; #define init_state_macro(stateInd,segmentInd) InitStatesM[NeuronID*perThreadStateMSize + stateInd*InMat.NComps+cSegToComp[segmentInd] ] ; SUPERILPMACRO(SET_KINETIC_STATE) #endif for (int count = 1; count < NILP + 1; count++) { if(cBoolModel[PIdx[count] +0*NSEG]){CuInitModel_Ca_HVA(v[count],state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuInitModel_Ca_LVAst(v[count],state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuInitModel_CaDynamics_E2(v[count],cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , ica[count] ,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuInitModel_Ih(v[count],state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuInitModel_Im(v[count],state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuInitModel_K_Pst(v[count],state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuInitModel_K_Tst(v[count],state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuInitModel_Nap_Et2(v[count],state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuInitModel_NaTa_t(v[count],state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuInitModel_NaTs2_t(v[count],state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){CuInitModel_pas(v[count],param_macro(14, PIdx[count]) ,param_macro(15, PIdx[count]) );}if(cBoolModel[PIdx[count] +11*NSEG]){CuInitModel_SK_E2(v[count],state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuInitModel_SKv3_1(v[count],state_macro(18,count) ,param_macro(18, PIdx[count]) );} } MYDTYPE stimLoc = stim.loc; MYFTYPE stimArea = stim.area; MYDTYPE dtCounter = 0; MYFTYPE dt = sim.dt; MYFTYPE temp; for (int i = 0; i<Nt; i++) { #ifdef STIMFROMCSV dt = stim.durs[i]; #endif STIMFROMCSV t += 0.5*dt; if ((i % (WARPSIZE) == 0)) { if (i>0) { for (int recInd = 0; recInd<sim.NRecSites; recInd++) { VHotGlobal[NeuronID*(sim.NRecSites*Nt*blockDim.y) + threadIdx.y*Nt*sim.NRecSites + recInd*Nt + (i - WARPSIZE) + PIdx[1]] = SMemVHot[WARPSIZE*recInd + PIdx[1]]; } } //amps[PIdx[1]] = stim.amps[threadIdx.y*Nt + i + PIdx[1]]; } for (int recInd = 0; recInd<sim.NRecSites; recInd++) { if (sim.RecSites[recInd] % WARPSIZE == threadIdx.x) //This is done by all threads why??? SMemVHot[recInd*WARPSIZE + i % (WARPSIZE)] = v[1];//This is going to be challenging to make it general but possible. } for (int count = 1; count < NILP + 1; count++) { rhs[count] = 0; D[count] = 0; sumCurrents[count] = 0; sumConductivity[count] = 0; sumCurrentsDv[count] = 0; sumConductivityDv[count] = 0; StimCurrent[count] = 0; ica[count] = 0;//SERIOUSLY??? check if this is correct does not seem right!!!! the whole point of ica is not to be initialized every time step... if (PIdx[count] == stimLoc) { StimCurrent[count] = 100 * stim.amps[threadIdx.y*Nt + i] / stimArea; } } for (int count = 1; count < NILP + 1; count++) { if(cBoolModel[PIdx[count] +0*NSEG]){CuBreakpointModel_Ca_HVA(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , temp,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuBreakpointModel_Ca_LVAst(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , temp,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuBreakpointModel_CaDynamics_E2(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , temp,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuBreakpointModel_Ih(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuBreakpointModel_Im(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuBreakpointModel_K_Pst(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuBreakpointModel_K_Tst(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuBreakpointModel_Nap_Et2(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuBreakpointModel_NaTa_t(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuBreakpointModel_NaTs2_t(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){CuBreakpointModel_pas(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,param_macro(14, PIdx[count]) ,param_macro(15, PIdx[count]) );}if(cBoolModel[PIdx[count] +11*NSEG]){CuBreakpointModel_SK_E2(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuBreakpointModel_SKv3_1(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(18,count) ,param_macro(18, PIdx[count]) );} if(cBoolModel[PIdx[count] +0*NSEG]){CuBreakpointModel_Ca_HVA(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuBreakpointModel_Ca_LVAst(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuBreakpointModel_CaDynamics_E2(sumCurrents[count] , sumConductivity[count],v[count] ,cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , ica[count] ,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuBreakpointModel_Ih(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuBreakpointModel_Im(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuBreakpointModel_K_Pst(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuBreakpointModel_K_Tst(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuBreakpointModel_Nap_Et2(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuBreakpointModel_NaTa_t(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuBreakpointModel_NaTs2_t(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){CuBreakpointModel_pas(sumCurrents[count] , sumConductivity[count],v[count] ,param_macro(14, PIdx[count]) ,param_macro(15, PIdx[count]) );}if(cBoolModel[PIdx[count] +11*NSEG]){CuBreakpointModel_SK_E2(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuBreakpointModel_SKv3_1(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(18,count) ,param_macro(18, PIdx[count]) );} } for (int count = 1; count < NILP + 1; count++) { gModel[count] = (sumCurrentsDv[count] - sumCurrents[count]) / EPS_V; rhs[count] = StimCurrent[count] - sumCurrents[count]; D[count] = gModel[count] + cCm[PIdx[count]] / (dt * 1000); D[count] -= cF[InMat.N - PIdx[count] - 1]; dv[count] += bHP[InMat.N - parentIndex[count] - 1] - bHP[InMat.N - PIdx[count] - 1]; } for (int count = 1; count < NILP + 1; count++) { rhs[count] -= cF[InMat.N - PIdx[count] - 1] * dv[count]; bHP[InMat.N - PIdx[count] - 1] = rhs[count]; uHP[InMat.N - PIdx[count] - 1] = D[count]; } //__syncthreads(); for (int count = 1; count < NILP + 1; count++) { if (cSonNoVec[PIdx[count]] == 1) { bHP[InMat.N - parentIndex[count] - 1] += cE[Eidx[count]] * dv[count]; uHP[InMat.N - parentIndex[count] - 1] -= cE[Eidx[count]]; }//WILL THIS WORK??? if (cSonNoVec[PIdx[count]] == 2) { bHP[InMat.N - parentIndex[count] - 1] += cE[Eidx[count]] * dv[count]; uHP[InMat.N - parentIndex[count] - 1] -= cE[Eidx[count]]; } } //__syncthreads(); BeforeLU(InMat, uHP, bHP, InMat.Depth); #ifdef BKSUB1 //__syncthreads(); BkSub(InMat, PX, PF, uHP, bHP, InMat.LognDepth); //__syncthreads(); for (int count = 1; count < NILP + 1; count++) { Vmid[count] = PX[InMat.N - PIdx[count] - 1]; v[count] += Vmid[count]; } #endif #ifdef BKSUB2 #define THISCOMMANDHERE37(VARILP) MYFTYPE vTemp_ ## VARILP=Vs[PIdx_ ## VARILP]; MYFTYPE vTemp[1] = Vs[PIdx[1]]; __syncthreads(); BkSub(InMat, uHP, bHP, Vs, InMat.Depth); __syncthreads(); PX = Vs; #define THISCOMMANDHERE38(VARILP) Vmid_ ## VARILP =PX[InMat.N-PIdx_ ## VARILP -1]; v_ ## VARILP +=Vmid_ ## VARILP ; Vmid[1] = PX[InMat.N - PIdx[1] - 1]; v[1] += Vmid[1]; __syncthreads(); #define THISCOMMANDHERE39(VARILP) Vs[PIdx_ ## VARILP ]= vTemp_ ## VARILP +Vmid_ ## VARILP ; Vs[PIdx[1]] = vTemp[1] + Vmid[1]; #endif t+=0.5*dt; // if(InMat.boolModel[PIdx_1 +0*InMat.N]){CuDerivModel_ca(dt, v_1,ModelStates_1[0],ModelStates_1[1],p0_1 ,p1_1 ,ModelStates_1[8],ModelStates_1[9]);} if(InMat.boolModel[PIdx_1 +1*InMat.N]){CuDerivModel_cad(dt, v_1,ModelStates_1[2],ModelStates_1[9],ModelStates_1[8]);} if(InMat.boolModel[PIdx_1 +2*InMat.N]){CuDerivModel_kca(dt, v_1,ModelStates_1[3],p2_1 ,p3_1 ,p4_1 ,p5_1 ,ModelStates_1[8]);} if(InMat.boolModel[PIdx_1 +3*InMat.N]){CuDerivModel_km(dt, v_1,ModelStates_1[4],p6_1 ,p7_1 ,p8_1 ,p9_1 ,p10_1 );} if(InMat.boolModel[PIdx_1 +4*InMat.N]){CuDerivModel_kv(dt, v_1,ModelStates_1[5],p11_1 ,p12_1 ,p13_1 ,p14_1 ,p15_1 );} if(InMat.boolModel[PIdx_1 +5*InMat.N]){CuDerivModel_na(dt, v_1,ModelStates_1[6],ModelStates_1[7],p16_1 ,p17_1 ,p18_1 ,p19_1 ,p20_1 ,p21_1 ,p22_1 ,p23_1 ,p24_1 ,p25_1 ,p26_1 ,p27_1 );} if(InMat.boolModel[PIdx_2 +0*InMat.N]){CuDerivModel_ca(dt, v_2,ModelStates_2[0],ModelStates_2[1],p0_2 ,p1_2 ,ModelStates_2[8],ModelStates_2[9]);} if(InMat.boolModel[PIdx_2 +1*InMat.N]){CuDerivModel_cad(dt, v_2,ModelStates_2[2],ModelStates_2[9],ModelStates_2[8]);} if(InMat.boolModel[PIdx_2 +2*InMat.N]){CuDerivModel_kca(dt, v_2,ModelStates_2[3],p2_2 ,p3_2 ,p4_2 ,p5_2 ,ModelStates_2[8]);} if(InMat.boolModel[PIdx_2 +3*InMat.N]){CuDerivModel_km(dt, v_2,ModelStates_2[4],p6_2 ,p7_2 ,p8_2 ,p9_2 ,p10_2 );} if(InMat.boolModel[PIdx_2 +4*InMat.N]){CuDerivModel_kv(dt, v_2,ModelStates_2[5],p11_2 ,p12_2 ,p13_2 ,p14_2 ,p15_2 );} if(InMat.boolModel[PIdx_2 +5*InMat.N]){CuDerivModel_na(dt, v_2,ModelStates_2[6],ModelStates_2[7],p16_2 ,p17_2 ,p18_2 ,p19_2 ,p20_2 ,p21_2 ,p22_2 ,p23_2 ,p24_2 ,p25_2 ,p26_2 ,p27_2 );} if(InMat.boolModel[PIdx_3 +0*InMat.N]){CuDerivModel_ca(dt, v_3,ModelStates_3[0],ModelStates_3[1],p0_3 ,p1_3 ,ModelStates_3[8],ModelStates_3[9]);} if(InMat.boolModel[PIdx_3 +1*InMat.N]){CuDerivModel_cad(dt, v_3,ModelStates_3[2],ModelStates_3[9],ModelStates_3[8]);} if(InMat.boolModel[PIdx_3 +2*InMat.N]){CuDerivModel_kca(dt, v_3,ModelStates_3[3],p2_3 ,p3_3 ,p4_3 ,p5_3 ,ModelStates_3[8]);} if(InMat.boolModel[PIdx_3 +3*InMat.N]){CuDerivModel_km(dt, v_3,ModelStates_3[4],p6_3 ,p7_3 ,p8_3 ,p9_3 ,p10_3 );} if(InMat.boolModel[PIdx_3 +4*InMat.N]){CuDerivModel_kv(dt, v_3,ModelStates_3[5],p11_3 ,p12_3 ,p13_3 ,p14_3 ,p15_3 );} if(InMat.boolModel[PIdx_3 +5*InMat.N]){CuDerivModel_na(dt, v_3,ModelStates_3[6],ModelStates_3[7],p16_3 ,p17_3 ,p18_3 ,p19_3 ,p20_3 ,p21_3 ,p22_3 ,p23_3 ,p24_3 ,p25_3 ,p26_3 ,p27_3 );} for (int count = 1; count < NILP + 1; count++) { if(cBoolModel[PIdx[count] +0*NSEG]){CuDerivModel_Ca_HVA(dt, v[count],state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , ica[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuDerivModel_Ca_LVAst(dt, v[count],state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , ica[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuDerivModel_CaDynamics_E2(dt, v[count],cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , ica[count] ,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuDerivModel_Ih(dt, v[count],state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuDerivModel_Im(dt, v[count],state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuDerivModel_K_Pst(dt, v[count],state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuDerivModel_K_Tst(dt, v[count],state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuDerivModel_Nap_Et2(dt, v[count],state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuDerivModel_NaTa_t(dt, v[count],state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuDerivModel_NaTs2_t(dt, v[count],state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){}if(cBoolModel[PIdx[count] +11*NSEG]){CuDerivModel_SK_E2(dt, v[count],state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuDerivModel_SKv3_1(dt, v[count],state_macro(18,count) ,param_macro(18, PIdx[count]) );} } } //This one looks suspicious but leaving it and will check it later. for (int recInd = 0; recInd<sim.NRecSites; recInd++) { VHotGlobal[NeuronID*(sim.NRecSites*Nt*blockDim.y) + threadIdx.y*Nt*sim.NRecSites + recInd*Nt + Nt - WARPSIZE + PIdx[1]] = SMemVHot[WARPSIZE*recInd + PIdx[1]]; } } __global__ void NeuroGPUKernel(Stim stim, MYFTYPE* ParamsM, MYFTYPE* ModelStates, Sim sim, HMat InMat, MYFTYPE *V, MYFTYPE* VHotGlobal, MYDTYPE CompDepth, MYDTYPE CompFDepth) { MYFTYPE *amps, *SMemVHot; MYDTYPE offset = 0; runSimulation(InMat, ParamsM, ModelStates, V, stim, sim, VHotGlobal); } void ReadParamsMatX(const char* FN, MYFTYPE* ParamsM, MYDTYPE NParams, MYDTYPE Nx) { char FileName[300]; sprintf(FileName, "%sForC.mat", FN); FILE *fl = fopen(FileName, "rb"); // YYY add FILE* if (!fl) { printf("Failed to read TreeData.x\n"); return; } fread(ParamsM, sizeof(MYFTYPE), Nx*NParams, fl); fclose(fl); } void ReadParamsMat(const char* FN, MYFTYPE** ParamsM, MYDTYPE NParams, MYDTYPE Nx) { char FileName[300]; //sprintf(FileName,"%s%d.mat",FN,MUL32*32); sprintf(FileName, "%sForC.mat", FN); FILE *fl = fopen(FileName, "rb"); // YYY add FILE* if (!fl) { printf("Failed to read TreeData.x\n"); return; } for (int i = 0; i<NParams; i++) { ParamsM[i] = (MYFTYPE*)malloc(Nx * sizeof(MYFTYPE)); fread(ParamsM[i], sizeof(MYFTYPE), Nx, fl); } fclose(fl); } void initFrameWork(Stim stim, Sim sim, MYFTYPE* ParamsM, MYFTYPE* InitStatesM, HMat& InMat, MYDTYPE CompDepth, MYDTYPE CompFDepth, MYDTYPE NSets, HMat& Mat_d) { printf("in initframework\n"); hipError_t cudaStatus; int i, j, t; // For matrix - MYFTYPE *PXOut_d, *PFOut_d; MYFTYPE *uHPOut_d, *bHPOut_d; Mat_d.N = InMat.N; Mat_d.NComps = InMat.NComps; Mat_d.Depth = InMat.Depth; Mat_d.NModels = InMat.NModels; Mat_d.LognDepth = InMat.LognDepth; Mat_d.nFathers = InMat.nFathers; Mat_d.nCallForFather = InMat.nCallForFather; Mat_d.nLRel = InMat.nLRel; Mat_d.nFLRel = InMat.nFLRel; // 32 data #ifdef BKSUB1 //cudaStatus = hipMalloc((void**)&Mat_d.FIdxs, InMat.LognDepth*InMat.N* sizeof(MYDTYPE)); #endif #ifdef BKSUB2 cudaStatus = hipMalloc((void**)&Mat_d.KsB, (InMat.N + 1) * sizeof(MYDTYPE)); #endif CUDA_RT_CALL(hipMemcpyToSymbol(cE, InMat.e, InMat.N * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cF, InMat.f, InMat.N * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cKs, InMat.Ks, InMat.N * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cSegToComp, InMat.SegToComp, InMat.N * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cBoolModel, InMat.boolModel, InMat.N * InMat.NModels * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cCm, InMat.Cms, InMat.N * sizeof(MYFTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cSonNoVec, InMat.SonNoVec, InMat.N * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cRelStarts, InMat.RelStarts, InMat.nFathers * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cRelEnds, InMat.RelEnds, InMat.nFathers * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cRelVec, InMat.RelVec, InMat.nCallForFather * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMemcpyToSymbol(cSegStartI, InMat.SegStartI, (InMat.nCallForFather + 1) * sizeof(MYDTYPE))); //CUDA_RT_CALL(hipMemcpy(Mat_d.SegEndI, InMat.SegEndI, (InMat.nCallForFather+1)* sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cSegEndI, InMat.SegEndI, (InMat.nCallForFather + 1) * sizeof(MYDTYPE))); //CUDA_RT_CALL(hipMemcpy(Mat_d.Fathers, InMat.Fathers, InMat.nFathers * sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cFathers, InMat.Fathers, InMat.nFathers * sizeof(MYDTYPE))); // 32 data #ifdef BKSUB1 //CUDA_RT_CALL(hipMemcpy(Mat_d.FIdxs, InMat.FIdxs, InMat.LognDepth*InMat.N* sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cFIdxs, InMat.FIdxs, InMat.LognDepth*InMat.N * sizeof(MYDTYPE))); #endif #ifdef BKSUB2 CUDA_RT_CALL(hipMemcpy(Mat_d.KsB, InMat.KsB, (InMat.N + 1) * sizeof(MYDTYPE), hipMemcpyHostToDevice); #endif //CUDA_RT_CALL(hipMemcpy(Mat_d.CompByLevel32, InMat.CompByLevel32, (CompDepth)*WARPSIZE*sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cCompByLevel32, InMat.CompByLevel32, (CompDepth)*WARPSIZE * sizeof(MYDTYPE))); //CUDA_RT_CALL(hipMemcpy(Mat_d.CompByFLevel32, InMat.CompByFLevel32, (CompFDepth)*WARPSIZE*sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cCompByFLevel32, InMat.CompByFLevel32, (CompFDepth)*WARPSIZE * sizeof(MYDTYPE))); //CUDA_RT_CALL(hipMemcpy(Mat_d.LRelStarts, InMat.LRelStarts,InMat.nLRel*sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cLRelStarts, InMat.LRelStarts, InMat.nLRel * sizeof(MYDTYPE))); //CUDA_RT_CALL(hipMemcpy(Mat_d.LRelEnds, InMat.LRelEnds,InMat.nLRel*sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cLRelEnds, InMat.LRelEnds, InMat.nLRel * sizeof(MYDTYPE))); //CUDA_RT_CALL(hipMemcpy(Mat_d.FLRelStarts, InMat.FLRelStarts,InMat.nFLRel*sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cFLRelStarts, InMat.FLRelStarts, InMat.nFLRel * sizeof(MYDTYPE))); //CUDA_RT_CALL(hipMemcpy(Mat_d.FLRelEnds, InMat.FLRelEnds,InMat.nFLRel*sizeof(MYDTYPE), hipMemcpyHostToDevice); CUDA_RT_CALL(hipMemcpyToSymbol(cFLRelEnds, InMat.FLRelEnds, InMat.nFLRel * sizeof(MYDTYPE))); CUDA_RT_CALL(hipMalloc((void**)&PXOut_d, (InMat.N + 1) * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(hipMalloc((void**)&PFOut_d, (InMat.N + 1) * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(hipDeviceSynchronize()); printf("done with all init framework\n"); } void callKernel(Stim stim, Sim sim, MYFTYPE* ParamsM, MYFTYPE* InitStatesM, HMat& Mat_d, MYFTYPE* V, MYDTYPE CompDepth, MYDTYPE CompFDepth, MYDTYPE prevRuns, MYDTYPE currKernelRun, MYFTYPE* VHotsHost) { MYDTYPE Nt = stim.Nt; MYFTYPE *d_modelParams, *d_modelStates; MYFTYPE *VHotsGlobal; MYFTYPE *V_d; CUDA_RT_CALL(hipMalloc((void**)&VHotsGlobal, currKernelRun*sim.NRecSites*Nt *stim.NStimuli * sizeof(MYFTYPE))); int memSizeForVHotGlobal = Nt*stim.NStimuli*sim.NRecSites; MYDTYPE memSizeForModelParams = NPARAMS * Mat_d.NComps; MYDTYPE memSizeForInitStatae = NSTATES * Mat_d.NComps; CUDA_RT_CALL(hipMalloc((void**)&V_d, Mat_d.N * sizeof(MYFTYPE))); CUDA_RT_CALL(hipMemcpy(V_d, V, Mat_d.N * sizeof(MYFTYPE), hipMemcpyHostToDevice)); Stim stim_d; stim_d.NStimuli = stim.NStimuli; stim_d.comp = stim.comp; stim_d.area = stim.area; stim_d.loc = stim.loc; stim_d.Nt = stim.Nt; Sim sim_d; sim_d.Celsius = sim.Celsius; sim_d.dt = sim.dt; sim_d.NRecSites = sim.NRecSites; sim_d.TFinal = sim.TFinal; #ifdef STIMFROMCSV printf("in mallocing loop\n******\n"); CUDA_RT_CALL(hipMalloc((void**)&stim_d.durs, stim_d.Nt * sizeof(MYFTYPE))); CUDA_RT_CALL(hipMalloc((void**)&stim_d.amps, stim_d.Nt*stim.NStimuli * sizeof(MYFTYPE))); #endif // STIMFROMFILE CUDA_RT_CALL(hipMalloc((void**)&sim_d.RecSites, sim_d.NRecSites * sizeof(MYDTYPE))); #ifdef STIMFROMCSV CUDA_RT_CALL(hipMemcpy(stim_d.durs, stim.durs, stim_d.Nt * sizeof(MYFTYPE), hipMemcpyHostToDevice)); CUDA_RT_CALL(hipMemcpy(stim_d.amps, stim.amps, stim_d.Nt*stim.NStimuli * sizeof(MYFTYPE), hipMemcpyHostToDevice)); #endif // stimf CUDA_RT_CALL(hipMemcpy(sim_d.RecSites, sim.RecSites, sim_d.NRecSites * sizeof(MYDTYPE), hipMemcpyHostToDevice)); #ifdef NKIN_STATES MYFTYPE *d_initStates; CUDA_RT_CALL(hipMalloc((void**)&d_initStates, NSTATES * InMat.NComps *NSets * sizeof(MYFTYPE)); CUDA_RT_CALL(hipMemcpy(d_initStates, InitStatesM, NSTATES * InMat.NComps * NSets * sizeof(MYFTYPE), hipMemcpyHostToDevice); #endif CUDA_RT_CALL(hipMalloc((void**)&d_modelParams, NPARAMS * Mat_d.NComps *currKernelRun * sizeof(MYFTYPE))); CUDA_RT_CALL(hipMemcpy(d_modelParams, &ParamsM[prevRuns*memSizeForModelParams], NPARAMS * Mat_d.NComps * currKernelRun * sizeof(MYFTYPE), hipMemcpyHostToDevice)); CUDA_RT_CALL(hipMalloc((void**)&d_modelStates, (NSTATES + 1) * (NSEG) * currKernelRun * sizeof(MYFTYPE))); dim3 blockDim(WARPSIZE, stim.NStimuli); dim3 gridDim(currKernelRun); #ifdef NKIN_STATES if (streamID == 0) { NeuroGPUKernel << <currKernelRun, blockDim, TotalSMem, stream0 >> > (stim_d, &d_modelParams[prevRuns*memSizeForModelParams], &d_initStates[prevRuns*memSizeForInitStatae], sim_d, Mat_d, V_d, &VHotsGlobal[prevRuns*memSizeForVHotGlobal], CompDepth, CompFDepth); // RRR CUDA_RT_CALL(hipMemcpyAsync(&VHotsHost[prevRuns*memSizeForVHotGlobal], &VHotsGlobal[prevRuns*memSizeForVHotGlobal], currKernelRun * Nt * sim.NRecSites * stim.NStimuli * sizeof(MYFTYPE), hipMemcpyDeviceToHost, stream0); printf("dev id is %d, cudastatus is %s\n", currDevice, cudaStatus); } #endif //#ifndef NKIN_STATES printf("kernel not ran yet\n"); NeuroGPUKernel << <currKernelRun, blockDim >> >(stim_d, d_modelParams, d_modelStates, sim_d, Mat_d, V_d, VHotsGlobal, CompDepth, CompFDepth); // RRR printf("kernel ran before memcpyasync currkernel run is %d\n", currKernelRun); CUDA_RT_CALL(hipMemcpyAsync(VHotsHost, VHotsGlobal, currKernelRun * Nt * sim.NRecSites * stim.NStimuli * sizeof(MYFTYPE), hipMemcpyDeviceToHost)); printf("done copying*&*&*&*&*&*&*\n"); } void stEfork2Main(Stim stim, Sim sim, MYFTYPE* ParamsM, MYFTYPE* InitStatesM, HMat& InMat, MYFTYPE* V, MYDTYPE CompDepth, MYDTYPE CompFDepth, int NSets, int* p2pCapableGPUs, int np2p) { MYFTYPE *Vhots; MYFTYPE **vhots_dev; MYDTYPE Nt = stim.Nt; printf("in stefork\n"); vhots_dev = (MYFTYPE**)(malloc(np2p * sizeof(MYFTYPE*))); Vhots = (MYFTYPE*)malloc(NSets*Nt*stim.NStimuli*sim.NRecSites * sizeof(MYFTYPE)); HMat Mat_d; if (np2p == 0) { np2p = 1; } for (int i = 0; i < np2p; i++) { printf("calling initframework p2pCapableGPUs[i] is %d\n", p2pCapableGPUs[i]); CUDA_RT_CALL(hipSetDevice(p2pCapableGPUs[i])); // hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); initFrameWork(stim, sim, ParamsM, InitStatesM, InMat, CompDepth, CompFDepth, NSets, Mat_d); } MYDTYPE prevRuns = 0; MYDTYPE currRun; if (NSets > np2p) { currRun = ceil(NSets / np2p); } else { currRun = NSets; }; printf("done initframework dev0 curr Kernel is %d\n", currRun); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); for (int i = 0; i < np2p; i++) { if (prevRuns >= NSets) break; CUDA_RT_CALL(hipSetDevice(p2pCapableGPUs[i])); printf("calling kernel dev%d\n", p2pCapableGPUs[i]); hipHostMalloc((void**)&vhots_dev[i], stim.NStimuli*Nt*sim.NRecSites*currRun * sizeof(MYFTYPE)); callKernel(stim, sim, ParamsM, InitStatesM, Mat_d, V, CompDepth, CompFDepth, prevRuns, currRun, vhots_dev[i]); prevRuns += currRun; } for (int i = 0; i < np2p; i++) { CUDA_RT_CALL(hipSetDevice(p2pCapableGPUs[i])); CUDA_RT_CALL(hipDeviceSynchronize()); printf("done synch%d\n", p2pCapableGPUs[i]); if (NSets <np2p) { printf("nsets >p2pdevs"); Vhots = vhots_dev[0]; } else { memcpy(&Vhots[(currRun*i)*stim.NStimuli*Nt*sim.NRecSites], vhots_dev[i], stim.NStimuli*Nt*sim.NRecSites*currRun * sizeof(MYFTYPE)); } } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("it took %f ms\n", milliseconds); FILE *file = fopen(TIMES_FN, "w"); if (file) { fprintf(file, "%d,%f\n", NSets, milliseconds); } else { printf("ERR SaveArrayToFile %s\n", TIMES_FN); } fclose(file); SaveArrayToFile(VHOT_OUT_FN_P, NSets*Nt*stim.NStimuli*sim.NRecSites, Vhots); }
9843af0b118079002aab91e2fa67357b46946a7d.cu
#include "CudaStuff.cuh" //#include "AllModels.cu" #include "AllModels.cuh" #define ILP16 __constant__ MYFTYPE cCm[NSEG]; __constant__ MYSECONDFTYPE cE[NSEG]; __constant__ MYSECONDFTYPE cF[NSEG]; __constant__ MYDTYPE cFIdxs[NSEG*LOG_N_DEPTH]; __constant__ MYDTYPE cKs[NSEG]; __constant__ MYDTYPE cSegToComp[NSEG]; __constant__ MYDTYPE cBoolModel[NSEG * N_MODELS];//One day change this to bool __constant__ MYDTYPE cRelStarts[N_FATHERS];//nFathers __constant__ MYDTYPE cRelEnds[N_FATHERS];//nFathers __constant__ MYDTYPE cFathers[N_FATHERS];//nFathers __constant__ MYDTYPE cRelVec[N_CALL_FOR_FATHER];//nCallForFather __constant__ MYDTYPE cSegStartI[N_CALL_FOR_FATHER + 1];//nCallForFather __constant__ MYDTYPE cSegEndI[N_CALL_FOR_FATHER + 1];//nCallForFather __constant__ MYDTYPE cCompByLevel32[COMP_DEPTH*WARPSIZE];//CompDepth __constant__ MYDTYPE cCompByFLevel32[COMP_DEPTH*WARPSIZE];//CompFDepth __constant__ MYDTYPE cLRelStarts[N_L_REL];//nLRel __constant__ MYDTYPE cLRelEnds[N_L_REL];//nLRel __constant__ MYDTYPE cFLRelStarts[N_F_L_REL];//nFLRel __constant__ MYDTYPE cFLRelEnds[N_F_L_REL];//nFLRel __constant__ MYDTYPE cSonNoVec[NSEG];//InMat.N #ifdef __INTELLISENSE__ void __syncthreads(); #endif __device__ void BeforeLU(HMat InMat, MYSECONDFTYPE* uHP, MYSECONDFTYPE* bHP, MYDTYPE Depth) { MYDTYPE PIdx = threadIdx.x; MYDTYPE i, j, CurJ, CurB, t, CurLevel, LRelIndex; MYDTYPE JumctionI; LRelIndex = cLRelStarts[CurLevel]; LRelIndex = LRelIndex + cLRelEnds[CurLevel]; for (CurLevel = 0; CurLevel <= Depth; CurLevel++) { for (LRelIndex = cLRelStarts[CurLevel]; LRelIndex <= cLRelEnds[CurLevel]; LRelIndex++) { //for(LRelIndex=cLRelStarts[CurLevel];LRelIndex<=InMat.LRelEnds[CurLevel];LRelIndex++){ JumctionI = cCompByLevel32[LRelIndex*WARPSIZE + PIdx] - 1; for (i = cSegStartI[JumctionI] - 1; i<cSegEndI[JumctionI]; i++) { MYSECONDFTYPE uHPm1 = uHP[i - 1]; uHP[i] = uHP[i] - cF[i - 1] * (cE[i - 1] / uHPm1); // So far same as paper parallel uHPm1 = uHP[i - 1]; MYSECONDFTYPE bHPm1 = bHP[i - 1]; bHP[i] = bHP[i] - bHPm1*cE[i - 1] / uHPm1; // bH is y } } if (CurLevel<Depth) { for (LRelIndex = cFLRelStarts[CurLevel]; LRelIndex <= cFLRelEnds[CurLevel]; LRelIndex++) { CurB = cCompByFLevel32[(LRelIndex)*WARPSIZE + PIdx] - 1;//RB i inserted another -1 into the index RB 2 i removed the-1 from the curlevel CurJ = cFathers[CurB] - 1; MYDTYPE St = cRelStarts[CurB]; MYDTYPE En = cRelEnds[CurB]; for (j = St; j <= En; j++) { t = cRelVec[j - 1] - 1; MYSECONDFTYPE uHPm1 = uHP[t - 1]; uHP[CurJ] -= cF[t - 1] * (cE[t - 1] / uHPm1); uHPm1 = uHP[t - 1]; MYSECONDFTYPE bHPm1 = bHP[t - 1]; bHP[CurJ] -= bHPm1*cE[t - 1] / uHPm1; } } } } } #ifdef BKSUB1 __device__ void BkSub(HMat InMat, MYSECONDFTYPE* PX, MYSECONDFTYPE* PF, MYSECONDFTYPE* uHP, MYSECONDFTYPE* bHP, MYDTYPE LognDepth) { // MYDTYPE PIdx_1=threadIdx.x; // MYDTYPE NextID_1; MYDTYPE PIdx[NILP + 1]; MYDTYPE NextID[NILP + 1]; for (int count = 1; count < NILP + 1; count++) { PIdx[count] = threadIdx.x + (WARPSIZE*(count - 1)); //this is from a different superilp PX[PIdx[count]] = PX[PIdx[count]] / PF[PIdx[count]]; PF[PIdx[count]] = -cF[PIdx[count]] / PF[PIdx[count]]; } MYFTYPE OldPXj[NILP + 1]; MYFTYPE OldPXNextID[NILP + 1]; MYFTYPE OldPFj[NILP + 1]; MYFTYPE OldPFNextID[NILP + 1]; MYDTYPE i; PX[InMat.N] = 0; PF[InMat.N] = 1; for (i = 0; i<LognDepth; i++) { for (int count = 1; count < NILP + 1; count++) { NextID[count] = cFIdxs[i*InMat.N + PIdx[count]] - 1; OldPXj[count] = PX[PIdx[count]]; OldPXNextID[count] = PX[NextID[count]]; PX[PIdx[count]] = OldPXj[count] + OldPXNextID[count] * PF[PIdx[count]]; } for (int count = 1; count < NILP + 1; count++) { OldPFj[count] = PF[PIdx[count]]; OldPFNextID[count] = PF[NextID[count]]; PF[PIdx[count]] = OldPFj[count] * OldPFNextID[count]; } } } #endif #ifdef BKSUB2 __device__ void BkSub(HMat InMat, MYSECONDFTYPE* uHP, MYSECONDFTYPE* bHP, MYSECONDFTYPE* Out, MYDTYPE Depth) { // MYDTYPE PIdx_3=threadIdx.x+(WARPSIZE*2); Out[PIdx_2]=0; // might be useless? #define THISCOMMANDHEREB1(VARILP) MYDTYPE PIdx_ ## VARILP =threadIdx.x+(WARPSIZE*( ## VARILP -1)); Out[PIdx_ ## VARILP ]=0; MYDTYPE PIdx[1] = threadIdx.x + (WARPSIZE*([1] - 1)); Out[PIdx[1]] = 0; MYDTYPE j, CurJ, CurB, t; MYDTYPE JumctionI; short CurLevel, i; // get KsB from MATLAB (this comes instead of FIdxsX) // KsB=Ks; // do in matlab // bPX=zeros(1,N); // might be useless // for CurLevel=Depth:-1:0 MYDTYPE LRelIndex, k; MYFTYPE temp; for (CurLevel = Depth; CurLevel >= 0; CurLevel--) { // Run all independent set for this level, in parallel // for JumctionI=find(Level==CurLevel) % in parallel for (LRelIndex = cLRelStarts[CurLevel]; LRelIndex <= cLRelEnds[CurLevel]; LRelIndex++) { JumctionI = cCompByLevel32[LRelIndex*WARPSIZE + PIdx_1] - 1; // for i=(cSegEndI(JumctionI)):-1:(cSegStartI(JumctionI)-1) for (i = cSegEndI[JumctionI] - 1; i >= (cSegStartI[JumctionI] - 2); i--) { // k=cKsB(i+1); k = cKsB[i + 1]; // bPX(i)=(bH(i)-bPX(k)*f(i))/uH(i); Out[i] = (bHP[i] - Out[k] * cF[i]) / uHP[i]; } } } } #endif __device__ void runSimulation(HMat InMat, const MYFTYPE* __restrict__ ParamsM, MYFTYPE* ModelStates, MYFTYPE* V, Stim stim, Sim sim, MYFTYPE* VHotGlobal) { __shared__ MYSECONDFTYPE uHP_all[(NSEG + 2)*NTRACES]; __shared__ MYSECONDFTYPE bHP_all[(NSEG + 2)*NTRACES]; __shared__ MYFTYPE SMemVHot_all[WARPSIZE*NTRACES]; MYSECONDFTYPE *uHP = &uHP_all[(NSEG + 2)*threadIdx.y]; MYSECONDFTYPE *bHP = &bHP_all[(NSEG + 2)*threadIdx.y]; MYFTYPE *SMemVHot = &SMemVHot_all[(WARPSIZE)*threadIdx.y]; MYDTYPE StimID = threadIdx.y; //MYDTYPE PerStimulus; //PerStimulus = InMat.N+2; MYDTYPE NeuronID = blockIdx.x; int Nt = stim.Nt; MYFTYPE t = 0; MYSECONDFTYPE *PX, *PF; PX = bHP; PF = uHP; MYDTYPE PIdx[NILP + 1]; for (int count = 1; count < NILP + 1; count++) { PIdx[count] = threadIdx.x + (WARPSIZE*(count - 1)); } int perBlockStatesSize = (NSEG)*(NSTATES + 1); #define state_macro(stateind,segmentInd) ModelStates[NeuronID*perBlockStatesSize + stateind*NSEG+PIdx[segmentInd]]//Is this coalesced? MYFTYPE Vmid[NILP + 1]; MYFTYPE v[NILP + 1]; MYFTYPE dv[NILP + 1]; MYSECONDFTYPE sumCurrents[NILP + 1]; MYSECONDFTYPE sumCurrentsDv[NILP + 1]; MYFTYPE sumConductivity[NILP + 1]; MYFTYPE sumConductivityDv[NILP + 1]; MYDTYPE parentIndex[NILP + 1]; MYDTYPE Eidx[NILP + 1]; MYSECONDFTYPE rhs[NILP + 1]; MYSECONDFTYPE D[NILP + 1]; MYFTYPE gModel[NILP + 1]; MYFTYPE cai[NILP + 1]; MYFTYPE ica[NILP + 1]; MYFTYPE eca[NILP + 1]; MYFTYPE StimCurrent[NILP + 1]; for (int count = 1; count < NILP + 1; count++) { v[count] = V[PIdx[count]]; sumCurrents[count] = 0; sumCurrentsDv[count] = 0; sumConductivity[count] = 0; sumConductivityDv[count] = 0; bHP[count] = 0; dv[count] = 0; Eidx[count] = InMat.N - PIdx[count] - 1; parentIndex[count] = InMat.N - cKs[InMat.N - PIdx[count]]; cai[count] = 0; ica[count] = 0; eca[count] = 0; if (PIdx[count] == 0) { parentIndex[count] = 0; }; for (int count1 = 0; count1 < NSTATES; count1++) { state_macro(count1, count) = 0; } } if (Eidx[1]>InMat.N - 1) { Eidx[1] = InMat.N - 1; } MYDTYPE perThreadParamMSize = InMat.NComps*NPARAMS; #define param_macro(paramInd,segmentInd) ParamsM[NeuronID*perThreadParamMSize + paramInd*InMat.NComps+cSegToComp[segmentInd] ] #ifdef NKIN_STATES MYDTYPE perThreadStateMSize = InMat.NComps*NKIN_STATES; #define init_state_macro(stateInd,segmentInd) InitStatesM[NeuronID*perThreadStateMSize + stateInd*InMat.NComps+cSegToComp[segmentInd] ] ; SUPERILPMACRO(SET_KINETIC_STATE) #endif for (int count = 1; count < NILP + 1; count++) { if(cBoolModel[PIdx[count] +0*NSEG]){CuInitModel_Ca_HVA(v[count],state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuInitModel_Ca_LVAst(v[count],state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuInitModel_CaDynamics_E2(v[count],cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , ica[count] ,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuInitModel_Ih(v[count],state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuInitModel_Im(v[count],state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuInitModel_K_Pst(v[count],state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuInitModel_K_Tst(v[count],state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuInitModel_Nap_Et2(v[count],state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuInitModel_NaTa_t(v[count],state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuInitModel_NaTs2_t(v[count],state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){CuInitModel_pas(v[count],param_macro(14, PIdx[count]) ,param_macro(15, PIdx[count]) );}if(cBoolModel[PIdx[count] +11*NSEG]){CuInitModel_SK_E2(v[count],state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuInitModel_SKv3_1(v[count],state_macro(18,count) ,param_macro(18, PIdx[count]) );} } MYDTYPE stimLoc = stim.loc; MYFTYPE stimArea = stim.area; MYDTYPE dtCounter = 0; MYFTYPE dt = sim.dt; MYFTYPE temp; for (int i = 0; i<Nt; i++) { #ifdef STIMFROMCSV dt = stim.durs[i]; #endif STIMFROMCSV t += 0.5*dt; if ((i % (WARPSIZE) == 0)) { if (i>0) { for (int recInd = 0; recInd<sim.NRecSites; recInd++) { VHotGlobal[NeuronID*(sim.NRecSites*Nt*blockDim.y) + threadIdx.y*Nt*sim.NRecSites + recInd*Nt + (i - WARPSIZE) + PIdx[1]] = SMemVHot[WARPSIZE*recInd + PIdx[1]]; } } //amps[PIdx[1]] = stim.amps[threadIdx.y*Nt + i + PIdx[1]]; } for (int recInd = 0; recInd<sim.NRecSites; recInd++) { if (sim.RecSites[recInd] % WARPSIZE == threadIdx.x) //This is done by all threads why??? SMemVHot[recInd*WARPSIZE + i % (WARPSIZE)] = v[1];//This is going to be challenging to make it general but possible. } for (int count = 1; count < NILP + 1; count++) { rhs[count] = 0; D[count] = 0; sumCurrents[count] = 0; sumConductivity[count] = 0; sumCurrentsDv[count] = 0; sumConductivityDv[count] = 0; StimCurrent[count] = 0; ica[count] = 0;//SERIOUSLY??? check if this is correct does not seem right!!!! the whole point of ica is not to be initialized every time step... if (PIdx[count] == stimLoc) { StimCurrent[count] = 100 * stim.amps[threadIdx.y*Nt + i] / stimArea; } } for (int count = 1; count < NILP + 1; count++) { if(cBoolModel[PIdx[count] +0*NSEG]){CuBreakpointModel_Ca_HVA(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , temp,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuBreakpointModel_Ca_LVAst(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , temp,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuBreakpointModel_CaDynamics_E2(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , temp,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuBreakpointModel_Ih(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuBreakpointModel_Im(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuBreakpointModel_K_Pst(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuBreakpointModel_K_Tst(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuBreakpointModel_Nap_Et2(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuBreakpointModel_NaTa_t(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuBreakpointModel_NaTs2_t(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){CuBreakpointModel_pas(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,param_macro(14, PIdx[count]) ,param_macro(15, PIdx[count]) );}if(cBoolModel[PIdx[count] +11*NSEG]){CuBreakpointModel_SK_E2(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuBreakpointModel_SKv3_1(sumCurrentsDv[count] , sumConductivityDv[count] ,v[count] +0.001,state_macro(18,count) ,param_macro(18, PIdx[count]) );} if(cBoolModel[PIdx[count] +0*NSEG]){CuBreakpointModel_Ca_HVA(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuBreakpointModel_Ca_LVAst(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , ica[count] ,eca[count] ,cai[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuBreakpointModel_CaDynamics_E2(sumCurrents[count] , sumConductivity[count],v[count] ,cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , ica[count] ,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuBreakpointModel_Ih(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuBreakpointModel_Im(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuBreakpointModel_K_Pst(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuBreakpointModel_K_Tst(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuBreakpointModel_Nap_Et2(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuBreakpointModel_NaTa_t(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuBreakpointModel_NaTs2_t(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){CuBreakpointModel_pas(sumCurrents[count] , sumConductivity[count],v[count] ,param_macro(14, PIdx[count]) ,param_macro(15, PIdx[count]) );}if(cBoolModel[PIdx[count] +11*NSEG]){CuBreakpointModel_SK_E2(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuBreakpointModel_SKv3_1(sumCurrents[count] , sumConductivity[count],v[count] ,state_macro(18,count) ,param_macro(18, PIdx[count]) );} } for (int count = 1; count < NILP + 1; count++) { gModel[count] = (sumCurrentsDv[count] - sumCurrents[count]) / EPS_V; rhs[count] = StimCurrent[count] - sumCurrents[count]; D[count] = gModel[count] + cCm[PIdx[count]] / (dt * 1000); D[count] -= cF[InMat.N - PIdx[count] - 1]; dv[count] += bHP[InMat.N - parentIndex[count] - 1] - bHP[InMat.N - PIdx[count] - 1]; } for (int count = 1; count < NILP + 1; count++) { rhs[count] -= cF[InMat.N - PIdx[count] - 1] * dv[count]; bHP[InMat.N - PIdx[count] - 1] = rhs[count]; uHP[InMat.N - PIdx[count] - 1] = D[count]; } //__syncthreads(); for (int count = 1; count < NILP + 1; count++) { if (cSonNoVec[PIdx[count]] == 1) { bHP[InMat.N - parentIndex[count] - 1] += cE[Eidx[count]] * dv[count]; uHP[InMat.N - parentIndex[count] - 1] -= cE[Eidx[count]]; }//WILL THIS WORK??? if (cSonNoVec[PIdx[count]] == 2) { bHP[InMat.N - parentIndex[count] - 1] += cE[Eidx[count]] * dv[count]; uHP[InMat.N - parentIndex[count] - 1] -= cE[Eidx[count]]; } } //__syncthreads(); BeforeLU(InMat, uHP, bHP, InMat.Depth); #ifdef BKSUB1 //__syncthreads(); BkSub(InMat, PX, PF, uHP, bHP, InMat.LognDepth); //__syncthreads(); for (int count = 1; count < NILP + 1; count++) { Vmid[count] = PX[InMat.N - PIdx[count] - 1]; v[count] += Vmid[count]; } #endif #ifdef BKSUB2 #define THISCOMMANDHERE37(VARILP) MYFTYPE vTemp_ ## VARILP=Vs[PIdx_ ## VARILP]; MYFTYPE vTemp[1] = Vs[PIdx[1]]; __syncthreads(); BkSub(InMat, uHP, bHP, Vs, InMat.Depth); __syncthreads(); PX = Vs; #define THISCOMMANDHERE38(VARILP) Vmid_ ## VARILP =PX[InMat.N-PIdx_ ## VARILP -1]; v_ ## VARILP +=Vmid_ ## VARILP ; Vmid[1] = PX[InMat.N - PIdx[1] - 1]; v[1] += Vmid[1]; __syncthreads(); #define THISCOMMANDHERE39(VARILP) Vs[PIdx_ ## VARILP ]= vTemp_ ## VARILP +Vmid_ ## VARILP ; Vs[PIdx[1]] = vTemp[1] + Vmid[1]; #endif t+=0.5*dt; // if(InMat.boolModel[PIdx_1 +0*InMat.N]){CuDerivModel_ca(dt, v_1,ModelStates_1[0],ModelStates_1[1],p0_1 ,p1_1 ,ModelStates_1[8],ModelStates_1[9]);} if(InMat.boolModel[PIdx_1 +1*InMat.N]){CuDerivModel_cad(dt, v_1,ModelStates_1[2],ModelStates_1[9],ModelStates_1[8]);} if(InMat.boolModel[PIdx_1 +2*InMat.N]){CuDerivModel_kca(dt, v_1,ModelStates_1[3],p2_1 ,p3_1 ,p4_1 ,p5_1 ,ModelStates_1[8]);} if(InMat.boolModel[PIdx_1 +3*InMat.N]){CuDerivModel_km(dt, v_1,ModelStates_1[4],p6_1 ,p7_1 ,p8_1 ,p9_1 ,p10_1 );} if(InMat.boolModel[PIdx_1 +4*InMat.N]){CuDerivModel_kv(dt, v_1,ModelStates_1[5],p11_1 ,p12_1 ,p13_1 ,p14_1 ,p15_1 );} if(InMat.boolModel[PIdx_1 +5*InMat.N]){CuDerivModel_na(dt, v_1,ModelStates_1[6],ModelStates_1[7],p16_1 ,p17_1 ,p18_1 ,p19_1 ,p20_1 ,p21_1 ,p22_1 ,p23_1 ,p24_1 ,p25_1 ,p26_1 ,p27_1 );} if(InMat.boolModel[PIdx_2 +0*InMat.N]){CuDerivModel_ca(dt, v_2,ModelStates_2[0],ModelStates_2[1],p0_2 ,p1_2 ,ModelStates_2[8],ModelStates_2[9]);} if(InMat.boolModel[PIdx_2 +1*InMat.N]){CuDerivModel_cad(dt, v_2,ModelStates_2[2],ModelStates_2[9],ModelStates_2[8]);} if(InMat.boolModel[PIdx_2 +2*InMat.N]){CuDerivModel_kca(dt, v_2,ModelStates_2[3],p2_2 ,p3_2 ,p4_2 ,p5_2 ,ModelStates_2[8]);} if(InMat.boolModel[PIdx_2 +3*InMat.N]){CuDerivModel_km(dt, v_2,ModelStates_2[4],p6_2 ,p7_2 ,p8_2 ,p9_2 ,p10_2 );} if(InMat.boolModel[PIdx_2 +4*InMat.N]){CuDerivModel_kv(dt, v_2,ModelStates_2[5],p11_2 ,p12_2 ,p13_2 ,p14_2 ,p15_2 );} if(InMat.boolModel[PIdx_2 +5*InMat.N]){CuDerivModel_na(dt, v_2,ModelStates_2[6],ModelStates_2[7],p16_2 ,p17_2 ,p18_2 ,p19_2 ,p20_2 ,p21_2 ,p22_2 ,p23_2 ,p24_2 ,p25_2 ,p26_2 ,p27_2 );} if(InMat.boolModel[PIdx_3 +0*InMat.N]){CuDerivModel_ca(dt, v_3,ModelStates_3[0],ModelStates_3[1],p0_3 ,p1_3 ,ModelStates_3[8],ModelStates_3[9]);} if(InMat.boolModel[PIdx_3 +1*InMat.N]){CuDerivModel_cad(dt, v_3,ModelStates_3[2],ModelStates_3[9],ModelStates_3[8]);} if(InMat.boolModel[PIdx_3 +2*InMat.N]){CuDerivModel_kca(dt, v_3,ModelStates_3[3],p2_3 ,p3_3 ,p4_3 ,p5_3 ,ModelStates_3[8]);} if(InMat.boolModel[PIdx_3 +3*InMat.N]){CuDerivModel_km(dt, v_3,ModelStates_3[4],p6_3 ,p7_3 ,p8_3 ,p9_3 ,p10_3 );} if(InMat.boolModel[PIdx_3 +4*InMat.N]){CuDerivModel_kv(dt, v_3,ModelStates_3[5],p11_3 ,p12_3 ,p13_3 ,p14_3 ,p15_3 );} if(InMat.boolModel[PIdx_3 +5*InMat.N]){CuDerivModel_na(dt, v_3,ModelStates_3[6],ModelStates_3[7],p16_3 ,p17_3 ,p18_3 ,p19_3 ,p20_3 ,p21_3 ,p22_3 ,p23_3 ,p24_3 ,p25_3 ,p26_3 ,p27_3 );} for (int count = 1; count < NILP + 1; count++) { if(cBoolModel[PIdx[count] +0*NSEG]){CuDerivModel_Ca_HVA(dt, v[count],state_macro(0,count) ,state_macro(1,count) ,param_macro(0, PIdx[count]) , ica[count] );}if(cBoolModel[PIdx[count] +1*NSEG]){CuDerivModel_Ca_LVAst(dt, v[count],state_macro(2,count) ,state_macro(3,count) ,param_macro(1, PIdx[count]) , ica[count] );}if(cBoolModel[PIdx[count] +2*NSEG]){CuDerivModel_CaDynamics_E2(dt, v[count],cai[count] ,param_macro(2, PIdx[count]) ,param_macro(3, PIdx[count]) ,param_macro(4, PIdx[count]) ,param_macro(5, PIdx[count]) , ica[count] ,eca[count] );}if(cBoolModel[PIdx[count] +3*NSEG]){CuDerivModel_Ih(dt, v[count],state_macro(5,count) ,param_macro(6, PIdx[count]) ,param_macro(7, PIdx[count]) );}if(cBoolModel[PIdx[count] +4*NSEG]){CuDerivModel_Im(dt, v[count],state_macro(6,count) ,param_macro(8, PIdx[count]) );}if(cBoolModel[PIdx[count] +5*NSEG]){CuDerivModel_K_Pst(dt, v[count],state_macro(7,count) ,state_macro(8,count) ,param_macro(9, PIdx[count]) );}if(cBoolModel[PIdx[count] +6*NSEG]){CuDerivModel_K_Tst(dt, v[count],state_macro(9,count) ,state_macro(10,count) ,param_macro(10, PIdx[count]) );}if(cBoolModel[PIdx[count] +7*NSEG]){CuDerivModel_Nap_Et2(dt, v[count],state_macro(11,count) ,state_macro(12,count) ,param_macro(11, PIdx[count]) );}if(cBoolModel[PIdx[count] +8*NSEG]){CuDerivModel_NaTa_t(dt, v[count],state_macro(13,count) ,state_macro(14,count) ,param_macro(12, PIdx[count]) );}if(cBoolModel[PIdx[count] +9*NSEG]){CuDerivModel_NaTs2_t(dt, v[count],state_macro(15,count) ,state_macro(16,count) ,param_macro(13, PIdx[count]) );}if(cBoolModel[PIdx[count] +10*NSEG]){}if(cBoolModel[PIdx[count] +11*NSEG]){CuDerivModel_SK_E2(dt, v[count],state_macro(17,count) ,param_macro(16, PIdx[count]) ,param_macro(17, PIdx[count]) , cai[count] ,eca[count] );}if(cBoolModel[PIdx[count] +12*NSEG]){CuDerivModel_SKv3_1(dt, v[count],state_macro(18,count) ,param_macro(18, PIdx[count]) );} } } //This one looks suspicious but leaving it and will check it later. for (int recInd = 0; recInd<sim.NRecSites; recInd++) { VHotGlobal[NeuronID*(sim.NRecSites*Nt*blockDim.y) + threadIdx.y*Nt*sim.NRecSites + recInd*Nt + Nt - WARPSIZE + PIdx[1]] = SMemVHot[WARPSIZE*recInd + PIdx[1]]; } } __global__ void NeuroGPUKernel(Stim stim, MYFTYPE* ParamsM, MYFTYPE* ModelStates, Sim sim, HMat InMat, MYFTYPE *V, MYFTYPE* VHotGlobal, MYDTYPE CompDepth, MYDTYPE CompFDepth) { MYFTYPE *amps, *SMemVHot; MYDTYPE offset = 0; runSimulation(InMat, ParamsM, ModelStates, V, stim, sim, VHotGlobal); } void ReadParamsMatX(const char* FN, MYFTYPE* ParamsM, MYDTYPE NParams, MYDTYPE Nx) { char FileName[300]; sprintf(FileName, "%sForC.mat", FN); FILE *fl = fopen(FileName, "rb"); // YYY add FILE* if (!fl) { printf("Failed to read TreeData.x\n"); return; } fread(ParamsM, sizeof(MYFTYPE), Nx*NParams, fl); fclose(fl); } void ReadParamsMat(const char* FN, MYFTYPE** ParamsM, MYDTYPE NParams, MYDTYPE Nx) { char FileName[300]; //sprintf(FileName,"%s%d.mat",FN,MUL32*32); sprintf(FileName, "%sForC.mat", FN); FILE *fl = fopen(FileName, "rb"); // YYY add FILE* if (!fl) { printf("Failed to read TreeData.x\n"); return; } for (int i = 0; i<NParams; i++) { ParamsM[i] = (MYFTYPE*)malloc(Nx * sizeof(MYFTYPE)); fread(ParamsM[i], sizeof(MYFTYPE), Nx, fl); } fclose(fl); } void initFrameWork(Stim stim, Sim sim, MYFTYPE* ParamsM, MYFTYPE* InitStatesM, HMat& InMat, MYDTYPE CompDepth, MYDTYPE CompFDepth, MYDTYPE NSets, HMat& Mat_d) { printf("in initframework\n"); cudaError_t cudaStatus; int i, j, t; // For matrix - MYFTYPE *PXOut_d, *PFOut_d; MYFTYPE *uHPOut_d, *bHPOut_d; Mat_d.N = InMat.N; Mat_d.NComps = InMat.NComps; Mat_d.Depth = InMat.Depth; Mat_d.NModels = InMat.NModels; Mat_d.LognDepth = InMat.LognDepth; Mat_d.nFathers = InMat.nFathers; Mat_d.nCallForFather = InMat.nCallForFather; Mat_d.nLRel = InMat.nLRel; Mat_d.nFLRel = InMat.nFLRel; // 32 data #ifdef BKSUB1 //cudaStatus = cudaMalloc((void**)&Mat_d.FIdxs, InMat.LognDepth*InMat.N* sizeof(MYDTYPE)); #endif #ifdef BKSUB2 cudaStatus = cudaMalloc((void**)&Mat_d.KsB, (InMat.N + 1) * sizeof(MYDTYPE)); #endif CUDA_RT_CALL(cudaMemcpyToSymbol(cE, InMat.e, InMat.N * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cF, InMat.f, InMat.N * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cKs, InMat.Ks, InMat.N * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cSegToComp, InMat.SegToComp, InMat.N * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cBoolModel, InMat.boolModel, InMat.N * InMat.NModels * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cCm, InMat.Cms, InMat.N * sizeof(MYFTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cSonNoVec, InMat.SonNoVec, InMat.N * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cRelStarts, InMat.RelStarts, InMat.nFathers * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cRelEnds, InMat.RelEnds, InMat.nFathers * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cRelVec, InMat.RelVec, InMat.nCallForFather * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMemcpyToSymbol(cSegStartI, InMat.SegStartI, (InMat.nCallForFather + 1) * sizeof(MYDTYPE))); //CUDA_RT_CALL(cudaMemcpy(Mat_d.SegEndI, InMat.SegEndI, (InMat.nCallForFather+1)* sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cSegEndI, InMat.SegEndI, (InMat.nCallForFather + 1) * sizeof(MYDTYPE))); //CUDA_RT_CALL(cudaMemcpy(Mat_d.Fathers, InMat.Fathers, InMat.nFathers * sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cFathers, InMat.Fathers, InMat.nFathers * sizeof(MYDTYPE))); // 32 data #ifdef BKSUB1 //CUDA_RT_CALL(cudaMemcpy(Mat_d.FIdxs, InMat.FIdxs, InMat.LognDepth*InMat.N* sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cFIdxs, InMat.FIdxs, InMat.LognDepth*InMat.N * sizeof(MYDTYPE))); #endif #ifdef BKSUB2 CUDA_RT_CALL(cudaMemcpy(Mat_d.KsB, InMat.KsB, (InMat.N + 1) * sizeof(MYDTYPE), cudaMemcpyHostToDevice); #endif //CUDA_RT_CALL(cudaMemcpy(Mat_d.CompByLevel32, InMat.CompByLevel32, (CompDepth)*WARPSIZE*sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cCompByLevel32, InMat.CompByLevel32, (CompDepth)*WARPSIZE * sizeof(MYDTYPE))); //CUDA_RT_CALL(cudaMemcpy(Mat_d.CompByFLevel32, InMat.CompByFLevel32, (CompFDepth)*WARPSIZE*sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cCompByFLevel32, InMat.CompByFLevel32, (CompFDepth)*WARPSIZE * sizeof(MYDTYPE))); //CUDA_RT_CALL(cudaMemcpy(Mat_d.LRelStarts, InMat.LRelStarts,InMat.nLRel*sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cLRelStarts, InMat.LRelStarts, InMat.nLRel * sizeof(MYDTYPE))); //CUDA_RT_CALL(cudaMemcpy(Mat_d.LRelEnds, InMat.LRelEnds,InMat.nLRel*sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cLRelEnds, InMat.LRelEnds, InMat.nLRel * sizeof(MYDTYPE))); //CUDA_RT_CALL(cudaMemcpy(Mat_d.FLRelStarts, InMat.FLRelStarts,InMat.nFLRel*sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cFLRelStarts, InMat.FLRelStarts, InMat.nFLRel * sizeof(MYDTYPE))); //CUDA_RT_CALL(cudaMemcpy(Mat_d.FLRelEnds, InMat.FLRelEnds,InMat.nFLRel*sizeof(MYDTYPE), cudaMemcpyHostToDevice); CUDA_RT_CALL(cudaMemcpyToSymbol(cFLRelEnds, InMat.FLRelEnds, InMat.nFLRel * sizeof(MYDTYPE))); CUDA_RT_CALL(cudaMalloc((void**)&PXOut_d, (InMat.N + 1) * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(cudaMalloc((void**)&PFOut_d, (InMat.N + 1) * sizeof(MYSECONDFTYPE))); CUDA_RT_CALL(cudaThreadSynchronize()); printf("done with all init framework\n"); } void callKernel(Stim stim, Sim sim, MYFTYPE* ParamsM, MYFTYPE* InitStatesM, HMat& Mat_d, MYFTYPE* V, MYDTYPE CompDepth, MYDTYPE CompFDepth, MYDTYPE prevRuns, MYDTYPE currKernelRun, MYFTYPE* VHotsHost) { MYDTYPE Nt = stim.Nt; MYFTYPE *d_modelParams, *d_modelStates; MYFTYPE *VHotsGlobal; MYFTYPE *V_d; CUDA_RT_CALL(cudaMalloc((void**)&VHotsGlobal, currKernelRun*sim.NRecSites*Nt *stim.NStimuli * sizeof(MYFTYPE))); int memSizeForVHotGlobal = Nt*stim.NStimuli*sim.NRecSites; MYDTYPE memSizeForModelParams = NPARAMS * Mat_d.NComps; MYDTYPE memSizeForInitStatae = NSTATES * Mat_d.NComps; CUDA_RT_CALL(cudaMalloc((void**)&V_d, Mat_d.N * sizeof(MYFTYPE))); CUDA_RT_CALL(cudaMemcpy(V_d, V, Mat_d.N * sizeof(MYFTYPE), cudaMemcpyHostToDevice)); Stim stim_d; stim_d.NStimuli = stim.NStimuli; stim_d.comp = stim.comp; stim_d.area = stim.area; stim_d.loc = stim.loc; stim_d.Nt = stim.Nt; Sim sim_d; sim_d.Celsius = sim.Celsius; sim_d.dt = sim.dt; sim_d.NRecSites = sim.NRecSites; sim_d.TFinal = sim.TFinal; #ifdef STIMFROMCSV printf("in mallocing loop\n******\n"); CUDA_RT_CALL(cudaMalloc((void**)&stim_d.durs, stim_d.Nt * sizeof(MYFTYPE))); CUDA_RT_CALL(cudaMalloc((void**)&stim_d.amps, stim_d.Nt*stim.NStimuli * sizeof(MYFTYPE))); #endif // STIMFROMFILE CUDA_RT_CALL(cudaMalloc((void**)&sim_d.RecSites, sim_d.NRecSites * sizeof(MYDTYPE))); #ifdef STIMFROMCSV CUDA_RT_CALL(cudaMemcpy(stim_d.durs, stim.durs, stim_d.Nt * sizeof(MYFTYPE), cudaMemcpyHostToDevice)); CUDA_RT_CALL(cudaMemcpy(stim_d.amps, stim.amps, stim_d.Nt*stim.NStimuli * sizeof(MYFTYPE), cudaMemcpyHostToDevice)); #endif // stimf CUDA_RT_CALL(cudaMemcpy(sim_d.RecSites, sim.RecSites, sim_d.NRecSites * sizeof(MYDTYPE), cudaMemcpyHostToDevice)); #ifdef NKIN_STATES MYFTYPE *d_initStates; CUDA_RT_CALL(cudaMalloc((void**)&d_initStates, NSTATES * InMat.NComps *NSets * sizeof(MYFTYPE)); CUDA_RT_CALL(cudaMemcpy(d_initStates, InitStatesM, NSTATES * InMat.NComps * NSets * sizeof(MYFTYPE), cudaMemcpyHostToDevice); #endif CUDA_RT_CALL(cudaMalloc((void**)&d_modelParams, NPARAMS * Mat_d.NComps *currKernelRun * sizeof(MYFTYPE))); CUDA_RT_CALL(cudaMemcpy(d_modelParams, &ParamsM[prevRuns*memSizeForModelParams], NPARAMS * Mat_d.NComps * currKernelRun * sizeof(MYFTYPE), cudaMemcpyHostToDevice)); CUDA_RT_CALL(cudaMalloc((void**)&d_modelStates, (NSTATES + 1) * (NSEG) * currKernelRun * sizeof(MYFTYPE))); dim3 blockDim(WARPSIZE, stim.NStimuli); dim3 gridDim(currKernelRun); #ifdef NKIN_STATES if (streamID == 0) { NeuroGPUKernel << <currKernelRun, blockDim, TotalSMem, stream0 >> > (stim_d, &d_modelParams[prevRuns*memSizeForModelParams], &d_initStates[prevRuns*memSizeForInitStatae], sim_d, Mat_d, V_d, &VHotsGlobal[prevRuns*memSizeForVHotGlobal], CompDepth, CompFDepth); // RRR CUDA_RT_CALL(cudaMemcpyAsync(&VHotsHost[prevRuns*memSizeForVHotGlobal], &VHotsGlobal[prevRuns*memSizeForVHotGlobal], currKernelRun * Nt * sim.NRecSites * stim.NStimuli * sizeof(MYFTYPE), cudaMemcpyDeviceToHost, stream0); printf("dev id is %d, cudastatus is %s\n", currDevice, cudaStatus); } #endif //#ifndef NKIN_STATES printf("kernel not ran yet\n"); NeuroGPUKernel << <currKernelRun, blockDim >> >(stim_d, d_modelParams, d_modelStates, sim_d, Mat_d, V_d, VHotsGlobal, CompDepth, CompFDepth); // RRR printf("kernel ran before memcpyasync currkernel run is %d\n", currKernelRun); CUDA_RT_CALL(cudaMemcpyAsync(VHotsHost, VHotsGlobal, currKernelRun * Nt * sim.NRecSites * stim.NStimuli * sizeof(MYFTYPE), cudaMemcpyDeviceToHost)); printf("done copying*&*&*&*&*&*&*\n"); } void stEfork2Main(Stim stim, Sim sim, MYFTYPE* ParamsM, MYFTYPE* InitStatesM, HMat& InMat, MYFTYPE* V, MYDTYPE CompDepth, MYDTYPE CompFDepth, int NSets, int* p2pCapableGPUs, int np2p) { MYFTYPE *Vhots; MYFTYPE **vhots_dev; MYDTYPE Nt = stim.Nt; printf("in stefork\n"); vhots_dev = (MYFTYPE**)(malloc(np2p * sizeof(MYFTYPE*))); Vhots = (MYFTYPE*)malloc(NSets*Nt*stim.NStimuli*sim.NRecSites * sizeof(MYFTYPE)); HMat Mat_d; if (np2p == 0) { np2p = 1; } for (int i = 0; i < np2p; i++) { printf("calling initframework p2pCapableGPUs[i] is %d\n", p2pCapableGPUs[i]); CUDA_RT_CALL(cudaSetDevice(p2pCapableGPUs[i])); // cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); initFrameWork(stim, sim, ParamsM, InitStatesM, InMat, CompDepth, CompFDepth, NSets, Mat_d); } MYDTYPE prevRuns = 0; MYDTYPE currRun; if (NSets > np2p) { currRun = ceil(NSets / np2p); } else { currRun = NSets; }; printf("done initframework dev0 curr Kernel is %d\n", currRun); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for (int i = 0; i < np2p; i++) { if (prevRuns >= NSets) break; CUDA_RT_CALL(cudaSetDevice(p2pCapableGPUs[i])); printf("calling kernel dev%d\n", p2pCapableGPUs[i]); cudaMallocHost((void**)&vhots_dev[i], stim.NStimuli*Nt*sim.NRecSites*currRun * sizeof(MYFTYPE)); callKernel(stim, sim, ParamsM, InitStatesM, Mat_d, V, CompDepth, CompFDepth, prevRuns, currRun, vhots_dev[i]); prevRuns += currRun; } for (int i = 0; i < np2p; i++) { CUDA_RT_CALL(cudaSetDevice(p2pCapableGPUs[i])); CUDA_RT_CALL(cudaDeviceSynchronize()); printf("done synch%d\n", p2pCapableGPUs[i]); if (NSets <np2p) { printf("nsets >p2pdevs"); Vhots = vhots_dev[0]; } else { memcpy(&Vhots[(currRun*i)*stim.NStimuli*Nt*sim.NRecSites], vhots_dev[i], stim.NStimuli*Nt*sim.NRecSites*currRun * sizeof(MYFTYPE)); } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("it took %f ms\n", milliseconds); FILE *file = fopen(TIMES_FN, "w"); if (file) { fprintf(file, "%d,%f\n", NSets, milliseconds); } else { printf("ERR SaveArrayToFile %s\n", TIMES_FN); } fclose(file); SaveArrayToFile(VHOT_OUT_FN_P, NSets*Nt*stim.NStimuli*sim.NRecSites, Vhots); }
a05b92eb75b6d5bac81dcd848b423b0ec5a214be.hip
// !!! This is a file automatically generated by hipify!!! #define WIN32 #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; int aEnd = aBegin + wA - 1; int aStep = BLOCK_SIZE; int bBegin = BLOCK_SIZE * bx; int bStep = BLOCK_SIZE * wB; float Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; __syncthreads(); #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } __syncthreads(); } int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; error = hipMalloc((void **) &d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } else { hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } else { hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB); correct = false; } } printf("%s\n", correct ? "OK" : "FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n"); hipDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); hipSetDevice(devID); } hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
a05b92eb75b6d5bac81dcd848b423b0ec5a214be.cu
#define WIN32 #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; int aEnd = aBegin + wA - 1; int aStep = BLOCK_SIZE; int bBegin = BLOCK_SIZE * bx; int bStep = BLOCK_SIZE * wB; float Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; __syncthreads(); #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } __syncthreads(); } int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB); correct = false; } } printf("%s\n", correct ? "OK" : "FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n"); cudaDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); cudaSetDevice(devID); } cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
77719f826d884f4b72d30987b4d47f2327590020.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper // #include <cutil.h> #include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0f /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { checkCudaErrors(hipFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost)); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_fc_momentum_x[1]; __constant__ float3 ff_fc_momentum_y[1]; __constant__ float3 ff_fc_momentum_z[1]; __constant__ float3 ff_fc_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); hipError_t error; allocateReadWriteSets(Dg, Db); hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); freeReadWriteSets(Dg, Db); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s initialize variables \n", hipGetErrorString(error)); exit(-1); } } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); allocateReadWriteSets(Dg, Db); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); freeReadWriteSets(Dg, Db); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z, fc_i_density_energy); fc_momentum_x[i + 0*nelr] = fc_i_momentum_x.x; fc_momentum_x[i + 1*nelr] = fc_i_momentum_x.y; fc_momentum_x[i + 2*nelr] = fc_i_momentum_x.z; fc_momentum_y[i + 0*nelr] = fc_i_momentum_y.x; fc_momentum_y[i + 1*nelr] = fc_i_momentum_y.y; fc_momentum_y[i + 2*nelr] = fc_i_momentum_y.z; fc_momentum_z[i + 0*nelr] = fc_i_momentum_z.x; fc_momentum_z[i + 1*nelr] = fc_i_momentum_z.y; fc_momentum_z[i + 2*nelr] = fc_i_momentum_z.z; fc_density_energy[i + 0*nelr] = fc_i_density_energy.x; fc_density_energy[i + 1*nelr] = fc_i_density_energy.y; fc_density_energy[i + 2*nelr] = fc_i_density_energy.z; } void compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { dim3 Dg(nelr / block_length), Db(block_length); hipError_t error; allocateReadWriteSets(Dg,Db); hipLaunchKernelGGL(( cuda_compute_flux_contributions), dim3(Dg),dim3(Db), 0, 0, nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); freeReadWriteSets(Dg,Db); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contribution failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; fc_i_momentum_x.x = fc_momentum_x[i + 0*nelr]; fc_i_momentum_x.y = fc_momentum_x[i + 1*nelr]; fc_i_momentum_x.z = fc_momentum_x[i + 2*nelr]; fc_i_momentum_y.x = fc_momentum_y[i + 0*nelr]; fc_i_momentum_y.y = fc_momentum_y[i + 1*nelr]; fc_i_momentum_y.z = fc_momentum_y[i + 2*nelr]; fc_i_momentum_z.x = fc_momentum_z[i + 0*nelr]; fc_i_momentum_z.y = fc_momentum_z[i + 1*nelr]; fc_i_momentum_z.z = fc_momentum_z[i + 2*nelr]; fc_i_density_energy.x = fc_density_energy[i + 0*nelr]; fc_i_density_energy.y = fc_density_energy[i + 1*nelr]; fc_i_density_energy.z = fc_density_energy[i + 2*nelr]; float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 fc_nb_momentum_x, fc_nb_momentum_y, fc_nb_momentum_z; float3 fc_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); fc_nb_momentum_x.x = fc_momentum_x[nb + 0*nelr]; fc_nb_momentum_x.y = fc_momentum_x[nb + 1*nelr]; fc_nb_momentum_x.z = fc_momentum_x[nb + 2*nelr]; fc_nb_momentum_y.x = fc_momentum_y[nb + 0*nelr]; fc_nb_momentum_y.y = fc_momentum_y[nb + 1*nelr]; fc_nb_momentum_y.z = fc_momentum_y[nb + 2*nelr]; fc_nb_momentum_z.x = fc_momentum_z[nb + 0*nelr]; fc_nb_momentum_z.y = fc_momentum_z[nb + 1*nelr]; fc_nb_momentum_z.z = fc_momentum_z[nb + 2*nelr]; fc_nb_density_energy.x = fc_density_energy[nb + 0*nelr]; fc_nb_density_energy.y = fc_density_energy[nb + 1*nelr]; fc_nb_density_energy.z = fc_density_energy[nb + 2*nelr]; // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(fc_nb_density_energy.x+fc_i_density_energy.x); flux_i_momentum.x += factor*(fc_nb_momentum_x.x+fc_i_momentum_x.x); flux_i_momentum.y += factor*(fc_nb_momentum_y.x+fc_i_momentum_y.x); flux_i_momentum.z += factor*(fc_nb_momentum_z.x+fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(fc_nb_density_energy.y+fc_i_density_energy.y); flux_i_momentum.x += factor*(fc_nb_momentum_x.y+fc_i_momentum_x.y); flux_i_momentum.y += factor*(fc_nb_momentum_y.y+fc_i_momentum_y.y); flux_i_momentum.z += factor*(fc_nb_momentum_z.y+fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(fc_nb_density_energy.z+fc_i_density_energy.z); flux_i_momentum.x += factor*(fc_nb_momentum_x.z+fc_i_momentum_x.z); flux_i_momentum.y += factor*(fc_nb_momentum_y.z+fc_i_momentum_y.z); flux_i_momentum.z += factor*(fc_nb_momentum_z.z+fc_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_fc_density_energy[0].x+fc_i_density_energy.x); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].x + fc_i_momentum_x.x); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].x + fc_i_momentum_y.x); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].x + fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_fc_density_energy[0].y+fc_i_density_energy.y); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].y + fc_i_momentum_x.y); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].y + fc_i_momentum_y.y); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].y + fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_fc_density_energy[0].z+fc_i_density_energy.z); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].z + fc_i_momentum_x.z); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].z + fc_i_momentum_y.z); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].z + fc_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); allocateReadWriteSets(Dg,Db); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); freeReadWriteSets(Dg,Db); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); allocateReadWriteSets(Dg,Db); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); freeReadWriteSets(Dg,Db); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s update failed\n", hipGetErrorString(error)); exit(-1); } } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; hipDeviceProp_t prop; int dev; checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipGetDevice(&dev)); checkCudaErrors(hipGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_fc_momentum_x; float3 h_ff_fc_momentum_y; float3 h_ff_fc_momentum_z; float3 h_ff_fc_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_fc_momentum_x, h_ff_fc_momentum_y, h_ff_fc_momentum_z, h_ff_fc_density_energy); // copy far field conditions to the gpu checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_x, &h_ff_fc_momentum_x, sizeof(float3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_y, &h_ff_fc_momentum_y, sizeof(float3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_z, &h_ff_fc_momentum_z, sizeof(float3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_density_energy, &h_ff_fc_density_energy, sizeof(float3)) ); } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); float* fc_momentum_x = alloc<float>(nelr*NDIM); float* fc_momentum_y = alloc<float>(nelr*NDIM); float* fc_momentum_z = alloc<float>(nelr*NDIM); float* fc_density_energy = alloc<float>(nelr*NDIM); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); hipError_t error; // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", hipGetErrorString(error)); exit(-1); } for(int j = 0; j < RK; j++) { compute_flux_contributions(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contributions failed\n", hipGetErrorString(error)); exit(-1); } compute_flux(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", hipGetErrorString(error)); exit(-1); } time_step(j, nelr, old_variables, variables, step_factors, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s time_step\n", hipGetErrorString(error)); exit(-1); } } } hipDeviceSynchronize(); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); dealloc<float>(fc_momentum_x); dealloc<float>(fc_momentum_y); dealloc<float>(fc_momentum_z); dealloc<float>(fc_density_energy); std::cout << "Done..." << std::endl; return 0; }
77719f826d884f4b72d30987b4d47f2327590020.cu
#include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> #include<curd_lib_host.h> // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper // #include <cutil.h> #include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0f /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { checkCudaErrors(cudaFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost)); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_fc_momentum_x[1]; __constant__ float3 ff_fc_momentum_y[1]; __constant__ float3 ff_fc_momentum_z[1]; __constant__ float3 ff_fc_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); cudaError_t error; allocateReadWriteSets(Dg, Db); cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); freeReadWriteSets(Dg, Db); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s initialize variables \n", cudaGetErrorString(error)); exit(-1); } } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); allocateReadWriteSets(Dg, Db); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); freeReadWriteSets(Dg, Db); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z, fc_i_density_energy); fc_momentum_x[i + 0*nelr] = fc_i_momentum_x.x; fc_momentum_x[i + 1*nelr] = fc_i_momentum_x.y; fc_momentum_x[i + 2*nelr] = fc_i_momentum_x.z; fc_momentum_y[i + 0*nelr] = fc_i_momentum_y.x; fc_momentum_y[i + 1*nelr] = fc_i_momentum_y.y; fc_momentum_y[i + 2*nelr] = fc_i_momentum_y.z; fc_momentum_z[i + 0*nelr] = fc_i_momentum_z.x; fc_momentum_z[i + 1*nelr] = fc_i_momentum_z.y; fc_momentum_z[i + 2*nelr] = fc_i_momentum_z.z; fc_density_energy[i + 0*nelr] = fc_i_density_energy.x; fc_density_energy[i + 1*nelr] = fc_i_density_energy.y; fc_density_energy[i + 2*nelr] = fc_i_density_energy.z; } void compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { dim3 Dg(nelr / block_length), Db(block_length); cudaError_t error; allocateReadWriteSets(Dg,Db); cuda_compute_flux_contributions<<<Dg,Db>>>(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); freeReadWriteSets(Dg,Db); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contribution failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; fc_i_momentum_x.x = fc_momentum_x[i + 0*nelr]; fc_i_momentum_x.y = fc_momentum_x[i + 1*nelr]; fc_i_momentum_x.z = fc_momentum_x[i + 2*nelr]; fc_i_momentum_y.x = fc_momentum_y[i + 0*nelr]; fc_i_momentum_y.y = fc_momentum_y[i + 1*nelr]; fc_i_momentum_y.z = fc_momentum_y[i + 2*nelr]; fc_i_momentum_z.x = fc_momentum_z[i + 0*nelr]; fc_i_momentum_z.y = fc_momentum_z[i + 1*nelr]; fc_i_momentum_z.z = fc_momentum_z[i + 2*nelr]; fc_i_density_energy.x = fc_density_energy[i + 0*nelr]; fc_i_density_energy.y = fc_density_energy[i + 1*nelr]; fc_i_density_energy.z = fc_density_energy[i + 2*nelr]; float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 fc_nb_momentum_x, fc_nb_momentum_y, fc_nb_momentum_z; float3 fc_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); fc_nb_momentum_x.x = fc_momentum_x[nb + 0*nelr]; fc_nb_momentum_x.y = fc_momentum_x[nb + 1*nelr]; fc_nb_momentum_x.z = fc_momentum_x[nb + 2*nelr]; fc_nb_momentum_y.x = fc_momentum_y[nb + 0*nelr]; fc_nb_momentum_y.y = fc_momentum_y[nb + 1*nelr]; fc_nb_momentum_y.z = fc_momentum_y[nb + 2*nelr]; fc_nb_momentum_z.x = fc_momentum_z[nb + 0*nelr]; fc_nb_momentum_z.y = fc_momentum_z[nb + 1*nelr]; fc_nb_momentum_z.z = fc_momentum_z[nb + 2*nelr]; fc_nb_density_energy.x = fc_density_energy[nb + 0*nelr]; fc_nb_density_energy.y = fc_density_energy[nb + 1*nelr]; fc_nb_density_energy.z = fc_density_energy[nb + 2*nelr]; // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(fc_nb_density_energy.x+fc_i_density_energy.x); flux_i_momentum.x += factor*(fc_nb_momentum_x.x+fc_i_momentum_x.x); flux_i_momentum.y += factor*(fc_nb_momentum_y.x+fc_i_momentum_y.x); flux_i_momentum.z += factor*(fc_nb_momentum_z.x+fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(fc_nb_density_energy.y+fc_i_density_energy.y); flux_i_momentum.x += factor*(fc_nb_momentum_x.y+fc_i_momentum_x.y); flux_i_momentum.y += factor*(fc_nb_momentum_y.y+fc_i_momentum_y.y); flux_i_momentum.z += factor*(fc_nb_momentum_z.y+fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(fc_nb_density_energy.z+fc_i_density_energy.z); flux_i_momentum.x += factor*(fc_nb_momentum_x.z+fc_i_momentum_x.z); flux_i_momentum.y += factor*(fc_nb_momentum_y.z+fc_i_momentum_y.z); flux_i_momentum.z += factor*(fc_nb_momentum_z.z+fc_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_fc_density_energy[0].x+fc_i_density_energy.x); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].x + fc_i_momentum_x.x); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].x + fc_i_momentum_y.x); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].x + fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_fc_density_energy[0].y+fc_i_density_energy.y); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].y + fc_i_momentum_x.y); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].y + fc_i_momentum_y.y); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].y + fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_fc_density_energy[0].z+fc_i_density_energy.z); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].z + fc_i_momentum_x.z); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].z + fc_i_momentum_y.z); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].z + fc_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); allocateReadWriteSets(Dg,Db); cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); freeReadWriteSets(Dg,Db); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); allocateReadWriteSets(Dg,Db); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); freeReadWriteSets(Dg,Db); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s update failed\n", cudaGetErrorString(error)); exit(-1); } } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; cudaDeviceProp prop; int dev; checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaGetDevice(&dev)); checkCudaErrors(cudaGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_fc_momentum_x; float3 h_ff_fc_momentum_y; float3 h_ff_fc_momentum_z; float3 h_ff_fc_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_fc_momentum_x, h_ff_fc_momentum_y, h_ff_fc_momentum_z, h_ff_fc_density_energy); // copy far field conditions to the gpu checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_x, &h_ff_fc_momentum_x, sizeof(float3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_y, &h_ff_fc_momentum_y, sizeof(float3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_z, &h_ff_fc_momentum_z, sizeof(float3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_density_energy, &h_ff_fc_density_energy, sizeof(float3)) ); } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); float* fc_momentum_x = alloc<float>(nelr*NDIM); float* fc_momentum_y = alloc<float>(nelr*NDIM); float* fc_momentum_z = alloc<float>(nelr*NDIM); float* fc_density_energy = alloc<float>(nelr*NDIM); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaThreadSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); cudaError_t error; // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", cudaGetErrorString(error)); exit(-1); } for(int j = 0; j < RK; j++) { compute_flux_contributions(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contributions failed\n", cudaGetErrorString(error)); exit(-1); } compute_flux(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", cudaGetErrorString(error)); exit(-1); } time_step(j, nelr, old_variables, variables, step_factors, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s time_step\n", cudaGetErrorString(error)); exit(-1); } } } cudaThreadSynchronize(); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); dealloc<float>(fc_momentum_x); dealloc<float>(fc_momentum_y); dealloc<float>(fc_momentum_z); dealloc<float>(fc_density_energy); std::cout << "Done..." << std::endl; return 0; }