hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
ab2cad03984849551f51a5f711a291f7e4806272.hip
// !!! This is a file automatically generated by hipify!!! // This example demonstrates the use of shared per-block arrays // implement an optimized dense matrix multiplication algorithm. // Like the shared_variables.cu example, a per-block __shared__ // array acts as a "bandwidth multiplier" by eliminating redundant // loads issued by neighboring threads. #include <stdlib.h> #include <stdio.h> #include <vector> #include <algorithm> #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define TILE_WIDTH 16 // a simple version of matrix_multiply which issues redundant loads from off-chip global memory __global__ void matrix_multiply_simple(int *a, int *b, int *ab, size_t width) { // calculate the row & column index of the element // do dot product between row of a and column of b // write out this thread's result } void MatrixMulOnHost(int* M, int* N, int* P, int Width) { for (int i = 0; i < Width; ++i) { for (int j = 0; j < Width; ++j) { double sum = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements //const size_t n = 1<<10; const size_t n = 1024; std::cout << "Total element is " << n << "\n"; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n); for(int i = 0; i < n*n; ++i) { h_a[i] = static_cast<int>(rand()) / RAND_MAX; h_b[i] = static_cast<int>(rand()) / RAND_MAX; } // allocate storage for the device int *d_a = 0, *d_b = 0, *d_c = 0; hipMalloc((void**)&d_a, sizeof(int) * n * n); hipMalloc((void**)&d_b, sizeof(int) * n * n); hipMalloc((void**)&d_c, sizeof(int) * n * n); // copy input to the device hipMemcpy(d_a, &h_a[0], sizeof(int) * n * n, hipMemcpyHostToDevice); hipMemcpy(d_b, &h_b[0], sizeof(int) * n * n, hipMemcpyHostToDevice); // to get accurate timings, launch a single "warm-up" kernel hipLaunchKernelGGL(( matrix_multiply_simple), dim3(num_blocks),dim3(block_size), 0, 0, d_a, d_b, d_c, n); hipMemcpy(&h_c[0], d_c, sizeof(int) * n * n, hipMemcpyDeviceToHost); //------------------ int* h_r; h_r = (int*)malloc(sizeof(int) * n * n); MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n); for (int i=0; i<(n*n); i++) { if (h_r[i] != h_c[i]) { std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n"; exit(1); } } std::cout << "Result is correct."; // deallocate device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
ab2cad03984849551f51a5f711a291f7e4806272.cu
// This example demonstrates the use of shared per-block arrays // implement an optimized dense matrix multiplication algorithm. // Like the shared_variables.cu example, a per-block __shared__ // array acts as a "bandwidth multiplier" by eliminating redundant // loads issued by neighboring threads. #include <stdlib.h> #include <stdio.h> #include <vector> #include <algorithm> #include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define TILE_WIDTH 16 // a simple version of matrix_multiply which issues redundant loads from off-chip global memory __global__ void matrix_multiply_simple(int *a, int *b, int *ab, size_t width) { // calculate the row & column index of the element // do dot product between row of a and column of b // write out this thread's result } void MatrixMulOnHost(int* M, int* N, int* P, int Width) { for (int i = 0; i < Width; ++i) { for (int j = 0; j < Width; ++j) { double sum = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements //const size_t n = 1<<10; const size_t n = 1024; std::cout << "Total element is " << n << "\n"; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n); for(int i = 0; i < n*n; ++i) { h_a[i] = static_cast<int>(rand()) / RAND_MAX; h_b[i] = static_cast<int>(rand()) / RAND_MAX; } // allocate storage for the device int *d_a = 0, *d_b = 0, *d_c = 0; cudaMalloc((void**)&d_a, sizeof(int) * n * n); cudaMalloc((void**)&d_b, sizeof(int) * n * n); cudaMalloc((void**)&d_c, sizeof(int) * n * n); // copy input to the device cudaMemcpy(d_a, &h_a[0], sizeof(int) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &h_b[0], sizeof(int) * n * n, cudaMemcpyHostToDevice); // to get accurate timings, launch a single "warm-up" kernel matrix_multiply_simple<<<num_blocks,block_size>>>(d_a, d_b, d_c, n); cudaMemcpy(&h_c[0], d_c, sizeof(int) * n * n, cudaMemcpyDeviceToHost); //------------------ int* h_r; h_r = (int*)malloc(sizeof(int) * n * n); MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n); for (int i=0; i<(n*n); i++) { if (h_r[i] != h_c[i]) { std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n"; exit(1); } } std::cout << "Result is correct."; // deallocate device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
9e8e053c9f5023d7fa24d9ec2dd3c65d345fe4cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w, h); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; int i; // in [0,h[ int j; // in [0,w[ int s = TID; // in [0,... while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); // s[0,W*H[ --> i[0,H[ j[0,W[ ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); // update ptrTabPixels[s] s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
9e8e053c9f5023d7fa24d9ec2dd3c65d345fe4cd.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w, h); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; int i; // in [0,h[ int j; // in [0,w[ int s = TID; // in [0,... while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); // s[0,W*H[ --> i[0,H[ j[0,W[ ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); // update ptrTabPixels[s] s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
d9818c6edd878b80c41b55b2ef89b1a801e7df90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dlrm_raw_utils.h" #include <cstring> using namespace DLRM_RAW; #include <iostream> void process_kaggle_dataset(const std::string& input_dir_path, const std::string& output_dir_path, const int num_numericals, const int num_categoricals) { int max_chunk_per_file = 10000; // loop count, in a signle binary data, store how many chunks bool process_output = true; bool write_out = true; // int32_t hash_bucket = 40000000; // mod-idx // int max_cat_fea_cardi = 40000000; // 40M // int avg_cat_fea_cardi = 1000000; // 1M // int min_cat_fea_cardi = 1000000; // 1M // std::vector<int32_t> hist_sizes = {max_cat_fea_cardi, avg_cat_fea_cardi, avg_cat_fea_cardi, avg_cat_fea_cardi, avg_cat_fea_cardi, // min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, max_cat_fea_cardi, // max_cat_fea_cardi, avg_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, avg_cat_fea_cardi, // min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, max_cat_fea_cardi, // max_cat_fea_cardi, max_cat_fea_cardi, avg_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, // min_cat_fea_cardi}; // mod-idx int min_cat_fea_cardi = 10000000; // 10M int32_t hash_bucket = min_cat_fea_cardi; std::vector<int32_t> hist_sizes = {hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket}; // mod-idx size_t pool_alloc_size = (size_t)4 * 1024 * 1024 * 1024; // 4 GB //std::vector<int> dev = {0}; rmm::mr::device_memory_resource *base_mr = new rmm::mr::cuda_memory_resource(); auto *p_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(base_mr, pool_alloc_size); rmm::mr::set_current_device_resource(p_mr); std::vector<std::string> column_dtypes; // dtypes of label, dense, categorical std::vector<std::string> column_names; // names of label, dense, categorical std::vector<std::string> cat_column_names; // names of categorical std::map<std::string, int32_t> column_name_to_col_idx; // <col-name, idx> std::unordered_map<std::string, map_type<key_type, value_type>*> categorical_col_hash_tables; // <name, <key, value>> // label column_dtypes.push_back("int32"); column_names.push_back("label"); column_name_to_col_idx.insert(std::make_pair("label", 0)); // dense-features for (int k = 1; k <= 13; k++) { column_dtypes.push_back("int32"); std::string name = "I" + std::to_string(k); column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k)); } // categorical-features for (int k = 1; k <= num_categoricals; k++) { column_dtypes.push_back("str"); std::string name = "C" + std::to_string(k); column_names.push_back(name); cat_column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k+num_numericals-1)); auto cuda_map_obj = map_type<key_type, value_type>::create(compute_hash_table_size(hist_sizes[k-1])).release();; categorical_col_hash_tables.insert(std::make_pair(name, cuda_map_obj)); } int current_device = 0; hipDeviceProp_t prop; CK_CUDA_THROW_(hipGetDeviceProperties(&prop, current_device)); size_t read_chunks = 128 * 1024 * 1024; // read 128MB at one time uint32_t *accum_location = nullptr; // slot-size CK_CUDA_THROW_(hipMalloc(&accum_location, 128)); // 128 Bytes = 32 * uint32_t CK_CUDA_THROW_(hipMemset(accum_location, 0, 128)); // uint32_t *culled_index_count = nullptr; // CK_CUDA_THROW_(hipMalloc(&culled_index_count, 128)); // 128 Bytes = 32 * uint32_t size_t total_file_bytes_read = 0; const auto time_map_start = std::chrono::high_resolution_clock::now(); // get file size, hard-coded filename std::string input_file_name = std::string(input_dir_path + "/train.txt"); std::ifstream binary_reader(input_file_name, std::ios::binary); binary_reader.seekg(0, std::ios::end); size_t file_size = binary_reader.tellg(); binary_reader.close(); // csv arguments, https://docs.rapids.ai/api/libcudf/stable/structcudf_1_1io_1_1read__csv__args.html cudf_io::csv_reader_options in_args = cudf_io::csv_reader_options::builder( cudf_io::source_info{input_file_name}).header(-1); in_args.set_dtypes(column_dtypes); in_args.set_names(column_names); in_args.set_delimiter('\t'); in_args.set_byte_range_size(read_chunks); // how many bytes to read at one time. in_args.set_skipfooter(0); in_args.set_skiprows(0); in_args.set_use_cols_names(cat_column_names); int32_t total_row_nums = 0; int loop_count = 0; while (true) { total_file_bytes_read += in_args.get_byte_range_size(); cudf_io::table_with_metadata tbl_w_metadata = cudf_io::read_csv(in_args, p_mr); total_row_nums += tbl_w_metadata.tbl->num_rows(); dim3 block(prop.maxThreadsPerBlock, 1, 1); dim3 grid((tbl_w_metadata.tbl->num_rows() - 1) / block.x + 1, 1, 1); // categorical-features for (unsigned int k = 0; k < cat_column_names.size(); ++k) { auto col = std::move(tbl_w_metadata.tbl->get_column(k)); if (col.type().id() == cudf::type_id::STRING) { auto str_col = cudf::strings_column_view(col.view()); int64_t num_strings = str_col.size(); char *char_array = const_cast<char*>(str_col.chars().data<char>()); int32_t *offsets = const_cast<int32_t*>(str_col.offsets().data<int32_t>()); hipLaunchKernelGGL(( build_categorical_index<key_type, value_type>), dim3(grid), dim3(block), 0, 0, char_array, offsets, num_strings, // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else if (col.type().id() == cudf::type_id::INT32) { key_type *data = const_cast<key_type*>(col.view().data<key_type>()); bitmask_type *in_mask = const_cast<bitmask_type*>(col.view().null_mask()); hipLaunchKernelGGL(( build_categorical_index_from_ints<key_type, value_type>), dim3(grid), dim3(block), 0, 0, data, in_mask, tbl_w_metadata.tbl->num_rows(), // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else { ERROR_MESSAGE_("col.type().id() != [STRING, INT32]"); } } size_t new_byte_range_offset = in_args.get_byte_range_offset() + read_chunks; in_args.set_byte_range_offset(new_byte_range_offset); if (in_args.get_byte_range_offset() >= file_size) break; if ((in_args.get_byte_range_offset() + read_chunks) > file_size) { size_t new_byte_range_size = file_size - in_args.get_byte_range_offset(); in_args.set_byte_range_size(new_byte_range_size); } ++loop_count; if (loop_count == max_chunk_per_file) break; } MESSAGE_(input_file_name + "'s total rows number = " + std::to_string(total_row_nums)); // show: slot size array std::vector<uint32_t> host_sz_per_fea(num_categoricals); CK_CUDA_THROW_(hipMemcpy(host_sz_per_fea.data(), accum_location, num_categoricals * sizeof(uint32_t), hipMemcpyDeviceToHost)); MESSAGE_("Slot size array in " + input_file_name + ", missing value mapped to unused key: "); for (auto c : host_sz_per_fea) std::cout << (c) << ", "; std::cout << "\b\b" << std::endl; const auto time_map_stop = std::chrono::high_resolution_clock::now(); const auto time_map_build = std::chrono::duration_cast<std::chrono::milliseconds>(time_map_stop - time_map_start); MESSAGE_("Time used to build map: " + std::to_string(time_map_build.count()) + " milliseconds."); double read_bw = double(total_file_bytes_read) / (1024.0 * 1024.0 * 1024.0); read_bw = (read_bw / time_map_build.count()) * 1000.f; MESSAGE_("Total bytes read: " + std::to_string(total_file_bytes_read) + " Effective Read B/W: " + std::to_string(read_bw) + " GB/s."); // CK_CUDA_THROW_(hipFree(culled_index_count)); CK_CUDA_THROW_(hipFree(accum_location)); // starting to do the convertion if (process_output) { uint32_t *dev_slot_size_array = nullptr; size_t slot_size_array_size = num_categoricals * sizeof(uint32_t); CK_CUDA_THROW_(hipMalloc(&dev_slot_size_array, slot_size_array_size)); CK_CUDA_THROW_(hipMemcpy(dev_slot_size_array, host_sz_per_fea.data(), slot_size_array_size, hipMemcpyHostToDevice)); int32_t *dev_out_buffer = nullptr; int32_t *host_out_buffer = nullptr; size_t sz_output_buffer = 128 * 1024 * 1024; // 128 MB, = read_chunks CK_CUDA_THROW_(hipMalloc(&dev_out_buffer, sz_output_buffer)); CK_CUDA_THROW_(hipHostMalloc(&host_out_buffer, sz_output_buffer)); int64_t *dev_int_col_ptrs = nullptr; int64_t *dev_int_col_nullmask_ptrs = nullptr; int64_t *dev_cat_col_nullmask_ptrs = nullptr; int64_t *dev_categorical_col_hash_obj = nullptr; int64_t *dev_char_ptrs = nullptr; int64_t *dev_offset_ptrs = nullptr; size_t sz_dev_int_col = num_numericals * sizeof(int64_t); size_t sz_dev_cat_hash_obj = num_categoricals * sizeof(map_type<key_type, value_type>); size_t sz_dev_str_ptrs = num_categoricals * sizeof(int64_t); CK_CUDA_THROW_(hipMalloc(&dev_int_col_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(hipMalloc(&dev_int_col_nullmask_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(hipMalloc(&dev_cat_col_nullmask_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(hipMalloc(&dev_categorical_col_hash_obj, sz_dev_cat_hash_obj)); CK_CUDA_THROW_(hipMalloc(&dev_char_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(hipMalloc(&dev_offset_ptrs, sz_dev_str_ptrs)); // encode and write out binary int maxbytes = 96 * 1024; // dynamic shared memory size 96 KB hipFuncSetAttribute(process_data_rows<key_type, value_type>, hipFuncAttributeMaxDynamicSharedMemorySize, maxbytes); std::vector<map_type<key_type, value_type>> categorical_col_hash_obj; for (auto c : cat_column_names) { categorical_col_hash_obj.push_back(*categorical_col_hash_tables[c]); } CK_CUDA_THROW_(hipMemcpy((void*)dev_categorical_col_hash_obj, (void*)categorical_col_hash_obj.data(), sz_dev_cat_hash_obj, hipMemcpyHostToDevice)); if (process_output) { std::ofstream *binary_writer = nullptr; if (write_out) binary_writer = new std::ofstream(std::string(output_dir_path + "/train_data.bin"), std::ios::binary); size_t sz_total_output_binary = 0; const auto time_convert_start = std::chrono::high_resolution_clock::now(); // train_data.bin { int32_t rows_begin_train = 0, rows_end_train = 36672493; // train.txt [:36672493) std::string input_file_path = std::string(input_dir_path + "/train.txt"); sz_total_output_binary = convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer, dev_slot_size_array, rows_begin_train, rows_end_train, 3); MESSAGE_("Porcessed file: " + input_file_path + " for /train_data.bin"); MESSAGE_("Size of train_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer) binary_writer->close(); } // validation-data and testing-data { int32_t rows_begin_val = 36672493, rows_end_val = 41256555; // train.txt [36672493, 41256555) int32_t rows_begin_test = 41256555, rows_end_test = 45840617; // train.txt [41256555, 45840617] std::string input_file_path = std::string(input_dir_path + "/train.txt"); // val std::ofstream *binary_writer_val = nullptr; if (write_out) binary_writer_val = new std::ofstream(std::string(output_dir_path + "/val_data.bin"), std::ios::binary); sz_total_output_binary = convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer_val, dev_slot_size_array, rows_begin_val, rows_end_val, 3); MESSAGE_("Size of val_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer_val) binary_writer_val->close(); // test std::ofstream *binary_writer_test = nullptr; if (write_out) binary_writer_test = new std::ofstream(std::string(output_dir_path + "/test_data.bin"), std::ios::binary); sz_total_output_binary = convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer_test, dev_slot_size_array, rows_begin_test, rows_end_test, 3); MESSAGE_("Size of test_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer_test) binary_writer_test->close(); MESSAGE_("Processed file: " + input_file_path + " for val_data.bin and test_data.bin"); } const auto time_convert_stop = std::chrono::high_resolution_clock::now(); const auto time_convert_total = std::chrono::duration_cast<std::chrono::milliseconds>(time_convert_stop - time_convert_start); MESSAGE_("Time to process binaries: " + std::to_string(time_convert_total.count()) + " milliseconds."); double p_read_bw = (double)process_read_bytes / (1024.0 * 1024.0 * 1024.0); p_read_bw = (p_read_bw / time_convert_total.count()) * 1000.f; double p_write_bw = (double)process_write_bytes / (1024.0 * 1024.0 * 1024.0); p_write_bw = (p_write_bw / time_convert_total.count()) * 1000.f; size_t total_second_pass_bytes = process_read_bytes + process_write_bytes; double p_2nd_bw = (double)total_second_pass_bytes / (1024.0 * 1024.0 * 1024.0); p_2nd_bw = (p_2nd_bw / time_convert_total.count()) * 1000.f; MESSAGE_("Convert Bytes reading: " + std::to_string(process_read_bytes) + ", Effective reading B/W: " + std::to_string(p_read_bw) + " GB/s."); MESSAGE_("Convert Bytes writing: " + std::to_string(process_write_bytes) + ", Effective reading B/W: " + std::to_string(p_write_bw) + " GB/s."); MESSAGE_("Convert Bytes total: " + std::to_string(total_second_pass_bytes) + ", Effective reading B/W: " + std::to_string(p_2nd_bw) + " GB/s."); } const auto program_end_time = std::chrono::high_resolution_clock::now(); const auto application_time = std::chrono::duration_cast<std::chrono::milliseconds>(program_end_time - time_map_start); double app_bw = (double)total_file_bytes_read / (1024.0 * 1024.0 * 1024.0); app_bw = (app_bw / application_time.count()) * 1000.f; MESSAGE_("Application process B/W: " + std::to_string(app_bw) + " GB/s."); CK_CUDA_THROW_(hipFree(dev_out_buffer)); CK_CUDA_THROW_(hipHostFree(host_out_buffer)); CK_CUDA_THROW_(hipFree(dev_int_col_ptrs)); CK_CUDA_THROW_(hipFree(dev_int_col_nullmask_ptrs)); CK_CUDA_THROW_(hipFree(dev_categorical_col_hash_obj)); CK_CUDA_THROW_(hipFree(dev_char_ptrs)); CK_CUDA_THROW_(hipFree(dev_offset_ptrs)); CK_CUDA_THROW_(hipFree(dev_slot_size_array)); CK_CUDA_THROW_(hipFree(dev_cat_col_nullmask_ptrs)); } // destory map objects for (auto c : categorical_col_hash_tables) c.second->destroy(); delete p_mr; p_mr = nullptr; } void process_terabyte_dataset(const std::string& input_dir_path, const std::string& output_dir_path, const int num_numericals, const int num_categoricals, const std::vector<std::string>& train_days, const std::vector<std::string>& test_days) { int max_chunk_per_file = 10000; // loop count, in a signle binary data, store how many chunks bool process_output = true; bool write_out = true; int min_cat_fea_cardi = 40000000; // 40M int32_t hash_bucket = min_cat_fea_cardi; std::vector<int32_t> hist_sizes = {hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket}; // mod-idx size_t pool_alloc_size = (size_t)10 * 1024 * 1024 * 1024; // 10 GB rmm::mr::device_memory_resource *base_mr = new rmm::mr::cuda_memory_resource(); auto *p_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(base_mr, pool_alloc_size); rmm::mr::set_current_device_resource(p_mr); std::vector<std::string> column_dtypes; // dtypes of label, dense, categorical std::vector<std::string> column_names; // names of label, dense, categorical std::vector<std::string> cat_column_names; // names of categorical std::map<std::string, int32_t> column_name_to_col_idx; // <col-name, idx> std::unordered_map<std::string, map_type<key_type, value_type>*> categorical_col_hash_tables; // <name, <key, value>> // label column_dtypes.push_back("int32"); column_names.push_back("label"); column_name_to_col_idx.insert(std::make_pair("label", 0)); // dense-features for (int k = 1; k <= 13; k++) { column_dtypes.push_back("int32"); std::string name = "I" + std::to_string(k); column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k)); } // categorical-features for (int k = 1; k <= num_categoricals; k++) { column_dtypes.push_back("str"); std::string name = "C" + std::to_string(k); column_names.push_back(name); cat_column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k+num_numericals-1)); auto cuda_map_obj = map_type<key_type, value_type>::create(compute_hash_table_size(hist_sizes[k-1])).release();; categorical_col_hash_tables.insert(std::make_pair(name, cuda_map_obj)); } int current_device = 0; hipDeviceProp_t prop; CK_CUDA_THROW_(hipGetDeviceProperties(&prop, current_device)); size_t read_chunks = 128 * 1024 * 1024; // read 128MB at one time uint32_t *accum_location = nullptr; // slot-size CK_CUDA_THROW_(hipMalloc(&accum_location, 128)); // 128 Bytes = 32 * uint32_t CK_CUDA_THROW_(hipMemset(accum_location, 0, 128)); // uint32_t *culled_index_count = nullptr; // CK_CUDA_THROW_(hipMalloc(&culled_index_count, 128)); // 128 Bytes = 32 * uint32_t size_t total_file_bytes_read = 0; const auto time_map_start = std::chrono::high_resolution_clock::now(); // iteration on each day's data, including training and testing. std::vector<std::string> all_days; all_days.insert(all_days.end(), train_days.begin(), train_days.end()); all_days.insert(all_days.end(), test_days.begin(), test_days.end()); std::vector<size_t> sample_nums; for (const auto& day : all_days) { // get file size std::string input_file_name = input_dir_path + "/day_" + day; std::ifstream binary_reader(input_file_name, std::ios::binary); binary_reader.seekg(0, std::ios::end); size_t file_size = binary_reader.tellg(); binary_reader.close(); // csv arguments, https://docs.rapids.ai/api/libcudf/stable/structcudf_1_1io_1_1read__csv__args.html cudf_io::csv_reader_options in_args = cudf_io::csv_reader_options::builder( cudf_io::source_info{input_file_name}).header(-1); in_args.set_dtypes(column_dtypes); in_args.set_names(column_names); in_args.set_delimiter('\t'); in_args.set_byte_range_size(read_chunks); // how many bytes to read at one time. in_args.set_skipfooter(0); in_args.set_skiprows(0); in_args.set_use_cols_names(cat_column_names); int32_t total_row_nums = 0; int loop_count = 0; while (true) { total_file_bytes_read += in_args.get_byte_range_size(); cudf_io::table_with_metadata tbl_w_metadata = cudf_io::read_csv(in_args, p_mr); total_row_nums += tbl_w_metadata.tbl->num_rows(); dim3 block(prop.maxThreadsPerBlock, 1, 1); dim3 grid((tbl_w_metadata.tbl->num_rows() - 1) / block.x + 1, 1, 1); // categorical-features for (unsigned int k = 0; k < cat_column_names.size(); ++k) { auto col = std::move(tbl_w_metadata.tbl->get_column(k)); if (col.type().id() == cudf::type_id::STRING) { auto str_col = cudf::strings_column_view(col.view()); int64_t num_strings = str_col.size(); char *char_array = const_cast<char*>(str_col.chars().data<char>()); int32_t *offsets = const_cast<int32_t*>(str_col.offsets().data<int32_t>()); hipLaunchKernelGGL(( build_categorical_index<key_type, value_type>), dim3(grid), dim3(block), 0, 0, char_array, offsets, num_strings, // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else if (col.type().id() == cudf::type_id::INT32) { key_type *data = const_cast<key_type*>(col.view().data<key_type>()); bitmask_type *in_mask = const_cast<bitmask_type*>(col.view().null_mask()); hipLaunchKernelGGL(( build_categorical_index_from_ints<key_type, value_type>), dim3(grid), dim3(block), 0, 0, data, in_mask, tbl_w_metadata.tbl->num_rows(), // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else { ERROR_MESSAGE_("col.type().id() != [STRING, INT32]"); } } size_t new_byte_range_offset = in_args.get_byte_range_offset() + read_chunks; in_args.set_byte_range_offset(new_byte_range_offset); if (in_args.get_byte_range_offset() >= file_size) break; if ((in_args.get_byte_range_offset() + read_chunks) > file_size) { size_t new_byte_range_size = file_size - in_args.get_byte_range_offset(); in_args.set_byte_range_size(new_byte_range_size); } ++loop_count; if (loop_count == max_chunk_per_file) break; } MESSAGE_(input_file_name + "'s total rows number = " + std::to_string(total_row_nums)); sample_nums.push_back(total_row_nums); } // end for all_days // show: slot size array std::vector<uint32_t> host_sz_per_fea(num_categoricals); CK_CUDA_THROW_(hipMemcpy(host_sz_per_fea.data(), accum_location, num_categoricals * sizeof(uint32_t), hipMemcpyDeviceToHost)); MESSAGE_("Slot size array, missing value mapped to unused key: "); for (auto c : host_sz_per_fea) std::cout << (c) << ", "; std::cout << "\b\b" << std::endl; const auto time_map_stop = std::chrono::high_resolution_clock::now(); const auto time_map_build = std::chrono::duration_cast<std::chrono::milliseconds>(time_map_stop - time_map_start); MESSAGE_("Time used to build map: " + std::to_string(time_map_build.count()) + " milliseconds."); double read_bw = double(total_file_bytes_read) / (1024.0 * 1024.0 * 1024.0); read_bw = (read_bw / time_map_build.count()) * 1000.f; MESSAGE_("Total bytes read: " + std::to_string(total_file_bytes_read) + " Effective Read B/W: " + std::to_string(read_bw) + " GB/s."); // CK_CUDA_THROW_(hipFree(culled_index_count)); CK_CUDA_THROW_(hipFree(accum_location)); // starting to do the convertion if (process_output) { uint32_t *dev_slot_size_array = nullptr; size_t slot_size_array_size = num_categoricals * sizeof(uint32_t); CK_CUDA_THROW_(hipMalloc(&dev_slot_size_array, slot_size_array_size)); CK_CUDA_THROW_(hipMemcpy(dev_slot_size_array, host_sz_per_fea.data(), slot_size_array_size, hipMemcpyHostToDevice)); int32_t *dev_out_buffer = nullptr; int32_t *host_out_buffer = nullptr; size_t sz_output_buffer = 128 * 1024 * 1024; // 128 MB, = read_chunks CK_CUDA_THROW_(hipMalloc(&dev_out_buffer, sz_output_buffer)); CK_CUDA_THROW_(hipHostMalloc(&host_out_buffer, sz_output_buffer)); int64_t *dev_int_col_ptrs = nullptr; int64_t *dev_int_col_nullmask_ptrs = nullptr; int64_t *dev_cat_col_nullmask_ptrs = nullptr; int64_t *dev_categorical_col_hash_obj = nullptr; int64_t *dev_char_ptrs = nullptr; int64_t *dev_offset_ptrs = nullptr; size_t sz_dev_int_col = num_numericals * sizeof(int64_t); size_t sz_dev_cat_hash_obj = num_categoricals * sizeof(map_type<key_type, value_type>); size_t sz_dev_str_ptrs = num_categoricals * sizeof(int64_t); CK_CUDA_THROW_(hipMalloc(&dev_int_col_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(hipMalloc(&dev_int_col_nullmask_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(hipMalloc(&dev_cat_col_nullmask_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(hipMalloc(&dev_categorical_col_hash_obj, sz_dev_cat_hash_obj)); CK_CUDA_THROW_(hipMalloc(&dev_char_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(hipMalloc(&dev_offset_ptrs, sz_dev_str_ptrs)); // encode and write out binary int maxbytes = 96 * 1024; // dynamic shared memory size 96 KB hipFuncSetAttribute(process_data_rows<key_type, value_type>, hipFuncAttributeMaxDynamicSharedMemorySize, maxbytes); std::vector<map_type<key_type, value_type>> categorical_col_hash_obj; for (auto c : cat_column_names) { categorical_col_hash_obj.push_back(*categorical_col_hash_tables[c]); } CK_CUDA_THROW_(hipMemcpy((void*)dev_categorical_col_hash_obj, (void*)categorical_col_hash_obj.data(), sz_dev_cat_hash_obj, hipMemcpyHostToDevice)); if (process_output) { const auto time_convert_start = std::chrono::high_resolution_clock::now(); std::ofstream *binary_writer = nullptr; if (write_out) binary_writer = new std::ofstream(std::string(output_dir_path + "/train_data.bin"), std::ios::binary); size_t sz_total_output_binary = 0; // train_data.bin size_t saved_samples_num = 0; for (size_t i = 0; i < train_days.size(); i++) { const auto& day = train_days[i]; size_t needed_samples_num = 4195197692 - saved_samples_num; // total should be 4195197692 int32_t rows_begin_train = -1, rows_end_train = -1; // train.txt [:36672000) if (needed_samples_num < sample_nums[i]) rows_end_train = needed_samples_num; std::string input_file_path = input_dir_path + "/day_" + day; sz_total_output_binary += convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer, dev_slot_size_array, rows_begin_train, rows_end_train, 1); MESSAGE_("Porcessed file: " + input_file_path + " for /train_data.bin"); if (needed_samples_num < sample_nums[i]) { saved_samples_num += needed_samples_num; break; } else { saved_samples_num += sample_nums[i]; } } // end for train_days MESSAGE_("Size of train_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer) binary_writer->close(); // testing-data { // test_data.bin std::ofstream *binary_writer_test = nullptr; if (write_out) binary_writer_test = new std::ofstream(std::string(output_dir_path + "/test_data.bin"), std::ios::binary); sz_total_output_binary = 0; size_t saved_samples_num = 0; for (size_t i = 0; i < test_days.size(); ++i) { const auto& day = test_days[i]; size_t needed_samples_num = 89137319 - saved_samples_num; // total should be 89137319 int32_t rows_begin_test = -1, rows_end_test = -1; if (needed_samples_num < sample_nums[train_days.size() + i]) rows_end_test = needed_samples_num; // rows_begin_test = 89137318; rows_end_test = -1; // [89137318: ), second half std::string input_file_path = input_dir_path + "/day_" + day; sz_total_output_binary += convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer_test, dev_slot_size_array, rows_begin_test, rows_end_test, 1); MESSAGE_("Porcessed file: " + input_file_path + " for /test_data.bin"); if (needed_samples_num < sample_nums[train_days.size() + i]) { saved_samples_num += needed_samples_num; break; } else { saved_samples_num += sample_nums[train_days.size() + i]; } } // end for test_days MESSAGE_("Size of test_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer_test) binary_writer_test->close(); } const auto time_convert_stop = std::chrono::high_resolution_clock::now(); const auto time_convert_total = std::chrono::duration_cast<std::chrono::milliseconds>(time_convert_stop - time_convert_start); MESSAGE_("Time to process binaries: " + std::to_string(time_convert_total.count()) + " milliseconds."); double p_read_bw = (double)process_read_bytes / (1024.0 * 1024.0 * 1024.0); p_read_bw = (p_read_bw / time_convert_total.count()) * 1000.f; double p_write_bw = (double)process_write_bytes / (1024.0 * 1024.0 * 1024.0); p_write_bw = (p_write_bw / time_convert_total.count()) * 1000.f; size_t total_second_pass_bytes = process_read_bytes + process_write_bytes; double p_2nd_bw = (double)total_second_pass_bytes / (1024.0 * 1024.0 * 1024.0); p_2nd_bw = (p_2nd_bw / time_convert_total.count()) * 1000.f; MESSAGE_("Convert Bytes reading: " + std::to_string(process_read_bytes) + ", Effective reading B/W: " + std::to_string(p_read_bw) + " GB/s."); MESSAGE_("Convert Bytes writing: " + std::to_string(process_write_bytes) + ", Effective reading B/W: " + std::to_string(p_write_bw) + " GB/s."); MESSAGE_("Convert Bytes total: " + std::to_string(total_second_pass_bytes) + ", Effective reading B/W: " + std::to_string(p_2nd_bw) + " GB/s."); } const auto program_end_time = std::chrono::high_resolution_clock::now(); const auto application_time = std::chrono::duration_cast<std::chrono::milliseconds>(program_end_time - time_map_start); double app_bw = (double)total_file_bytes_read / (1024.0 * 1024.0 * 1024.0); app_bw = (app_bw / application_time.count()) * 1000.f; MESSAGE_("Application process B/W: " + std::to_string(app_bw) + " GB/s."); CK_CUDA_THROW_(hipFree(dev_out_buffer)); CK_CUDA_THROW_(hipHostFree(host_out_buffer)); CK_CUDA_THROW_(hipFree(dev_int_col_ptrs)); CK_CUDA_THROW_(hipFree(dev_int_col_nullmask_ptrs)); CK_CUDA_THROW_(hipFree(dev_categorical_col_hash_obj)); CK_CUDA_THROW_(hipFree(dev_char_ptrs)); CK_CUDA_THROW_(hipFree(dev_offset_ptrs)); CK_CUDA_THROW_(hipFree(dev_slot_size_array)); CK_CUDA_THROW_(hipFree(dev_cat_col_nullmask_ptrs)); } // destory map objects for (auto c : categorical_col_hash_tables) c.second->destroy(); delete p_mr; p_mr = nullptr; } int main(const int argc, const char *argv[]) { if (argc < 3) { MESSAGE_("Need min 2 args: input_dir output_dir"); MESSAGE_("Usage for Kaggle Datasets: ./dlrm_raw input_dir output_dir"); MESSAGE_("Usage for TeraBytes Datasets: ./dlrm_raw input_dir output_dir --train [days for training] --test [days for testing]" ", those days are seperated with comma, no whitespace."); return -1; } const int num_numericals = 14; // label + 13 int-dense-feature const int num_categoricals = 26; // 26 int-categorical-feature std::string input_dir_path(argv[1]); std::string output_dir_path(argv[2]); switch (argc) { case 3: { MESSAGE_("Processing Kaggle datasets"); MESSAGE_("input_dir: " + input_dir_path); MESSAGE_("output_dir: " + output_dir_path); process_kaggle_dataset(input_dir_path, output_dir_path, num_numericals, num_categoricals); break; } case 7: { if (argc == 7 && (std::strcmp(argv[3], "--train") != 0 || std::strcmp(argv[5], "--test") != 0)) { MESSAGE_("Usage for TeraBytes Datasets: ./dlrm_raw input_dir output_dir --train [days for training] --test [days for testing]" ", those days are seperated with comma, no whitespace."); MESSAGE_("For example: ./dlrm_raw ./ ./ --train 0,1,2,3,4 --test 5,6,7"); return -1; } const std::vector<std::string> train_days = split_string(std::string(argv[4]), ","); const std::vector<std::string> test_days = split_string(std::string(argv[6]), ","); MESSAGE_("Processing TeraBytes datasets."); MESSAGE_("input_dir: " + input_dir_path); MESSAGE_("output_dir: " + output_dir_path); MESSAGE_("days for training: " + std::string(argv[4])); MESSAGE_("days for testing: " + std::string(argv[6])); process_terabyte_dataset(input_dir_path, output_dir_path, num_numericals, num_categoricals, train_days, test_days); break; } default: { MESSAGE_("Usage for Kaggle Datasets: ./dlrm_raw input_dir output_dir"); MESSAGE_("Usage for TeraBytes Datasets: ./dlrm_raw input_dir output_dir --train [days for training] --test [days for testing]" ", those days are seperated with comma, no whitespace."); return -1; break; } } MESSAGE_("Done."); return 0; }
d9818c6edd878b80c41b55b2ef89b1a801e7df90.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dlrm_raw_utils.h" #include <cstring> using namespace DLRM_RAW; #include <iostream> void process_kaggle_dataset(const std::string& input_dir_path, const std::string& output_dir_path, const int num_numericals, const int num_categoricals) { int max_chunk_per_file = 10000; // loop count, in a signle binary data, store how many chunks bool process_output = true; bool write_out = true; // int32_t hash_bucket = 40000000; // mod-idx // int max_cat_fea_cardi = 40000000; // 40M // int avg_cat_fea_cardi = 1000000; // 1M // int min_cat_fea_cardi = 1000000; // 1M // std::vector<int32_t> hist_sizes = {max_cat_fea_cardi, avg_cat_fea_cardi, avg_cat_fea_cardi, avg_cat_fea_cardi, avg_cat_fea_cardi, // min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, max_cat_fea_cardi, // max_cat_fea_cardi, avg_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, avg_cat_fea_cardi, // min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, max_cat_fea_cardi, // max_cat_fea_cardi, max_cat_fea_cardi, avg_cat_fea_cardi, min_cat_fea_cardi, min_cat_fea_cardi, // min_cat_fea_cardi}; // mod-idx int min_cat_fea_cardi = 10000000; // 10M int32_t hash_bucket = min_cat_fea_cardi; std::vector<int32_t> hist_sizes = {hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket}; // mod-idx size_t pool_alloc_size = (size_t)4 * 1024 * 1024 * 1024; // 4 GB //std::vector<int> dev = {0}; rmm::mr::device_memory_resource *base_mr = new rmm::mr::cuda_memory_resource(); auto *p_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(base_mr, pool_alloc_size); rmm::mr::set_current_device_resource(p_mr); std::vector<std::string> column_dtypes; // dtypes of label, dense, categorical std::vector<std::string> column_names; // names of label, dense, categorical std::vector<std::string> cat_column_names; // names of categorical std::map<std::string, int32_t> column_name_to_col_idx; // <col-name, idx> std::unordered_map<std::string, map_type<key_type, value_type>*> categorical_col_hash_tables; // <name, <key, value>> // label column_dtypes.push_back("int32"); column_names.push_back("label"); column_name_to_col_idx.insert(std::make_pair("label", 0)); // dense-features for (int k = 1; k <= 13; k++) { column_dtypes.push_back("int32"); std::string name = "I" + std::to_string(k); column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k)); } // categorical-features for (int k = 1; k <= num_categoricals; k++) { column_dtypes.push_back("str"); std::string name = "C" + std::to_string(k); column_names.push_back(name); cat_column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k+num_numericals-1)); auto cuda_map_obj = map_type<key_type, value_type>::create(compute_hash_table_size(hist_sizes[k-1])).release();; categorical_col_hash_tables.insert(std::make_pair(name, cuda_map_obj)); } int current_device = 0; cudaDeviceProp prop; CK_CUDA_THROW_(cudaGetDeviceProperties(&prop, current_device)); size_t read_chunks = 128 * 1024 * 1024; // read 128MB at one time uint32_t *accum_location = nullptr; // slot-size CK_CUDA_THROW_(cudaMalloc(&accum_location, 128)); // 128 Bytes = 32 * uint32_t CK_CUDA_THROW_(cudaMemset(accum_location, 0, 128)); // uint32_t *culled_index_count = nullptr; // CK_CUDA_THROW_(cudaMalloc(&culled_index_count, 128)); // 128 Bytes = 32 * uint32_t size_t total_file_bytes_read = 0; const auto time_map_start = std::chrono::high_resolution_clock::now(); // get file size, hard-coded filename std::string input_file_name = std::string(input_dir_path + "/train.txt"); std::ifstream binary_reader(input_file_name, std::ios::binary); binary_reader.seekg(0, std::ios::end); size_t file_size = binary_reader.tellg(); binary_reader.close(); // csv arguments, https://docs.rapids.ai/api/libcudf/stable/structcudf_1_1io_1_1read__csv__args.html cudf_io::csv_reader_options in_args = cudf_io::csv_reader_options::builder( cudf_io::source_info{input_file_name}).header(-1); in_args.set_dtypes(column_dtypes); in_args.set_names(column_names); in_args.set_delimiter('\t'); in_args.set_byte_range_size(read_chunks); // how many bytes to read at one time. in_args.set_skipfooter(0); in_args.set_skiprows(0); in_args.set_use_cols_names(cat_column_names); int32_t total_row_nums = 0; int loop_count = 0; while (true) { total_file_bytes_read += in_args.get_byte_range_size(); cudf_io::table_with_metadata tbl_w_metadata = cudf_io::read_csv(in_args, p_mr); total_row_nums += tbl_w_metadata.tbl->num_rows(); dim3 block(prop.maxThreadsPerBlock, 1, 1); dim3 grid((tbl_w_metadata.tbl->num_rows() - 1) / block.x + 1, 1, 1); // categorical-features for (unsigned int k = 0; k < cat_column_names.size(); ++k) { auto col = std::move(tbl_w_metadata.tbl->get_column(k)); if (col.type().id() == cudf::type_id::STRING) { auto str_col = cudf::strings_column_view(col.view()); int64_t num_strings = str_col.size(); char *char_array = const_cast<char*>(str_col.chars().data<char>()); int32_t *offsets = const_cast<int32_t*>(str_col.offsets().data<int32_t>()); build_categorical_index<key_type, value_type><<<grid, block>>>(char_array, offsets, num_strings, // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else if (col.type().id() == cudf::type_id::INT32) { key_type *data = const_cast<key_type*>(col.view().data<key_type>()); bitmask_type *in_mask = const_cast<bitmask_type*>(col.view().null_mask()); build_categorical_index_from_ints<key_type, value_type><<<grid, block>>>(data, in_mask, tbl_w_metadata.tbl->num_rows(), // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else { ERROR_MESSAGE_("col.type().id() != [STRING, INT32]"); } } size_t new_byte_range_offset = in_args.get_byte_range_offset() + read_chunks; in_args.set_byte_range_offset(new_byte_range_offset); if (in_args.get_byte_range_offset() >= file_size) break; if ((in_args.get_byte_range_offset() + read_chunks) > file_size) { size_t new_byte_range_size = file_size - in_args.get_byte_range_offset(); in_args.set_byte_range_size(new_byte_range_size); } ++loop_count; if (loop_count == max_chunk_per_file) break; } MESSAGE_(input_file_name + "'s total rows number = " + std::to_string(total_row_nums)); // show: slot size array std::vector<uint32_t> host_sz_per_fea(num_categoricals); CK_CUDA_THROW_(cudaMemcpy(host_sz_per_fea.data(), accum_location, num_categoricals * sizeof(uint32_t), cudaMemcpyDeviceToHost)); MESSAGE_("Slot size array in " + input_file_name + ", missing value mapped to unused key: "); for (auto c : host_sz_per_fea) std::cout << (c) << ", "; std::cout << "\b\b" << std::endl; const auto time_map_stop = std::chrono::high_resolution_clock::now(); const auto time_map_build = std::chrono::duration_cast<std::chrono::milliseconds>(time_map_stop - time_map_start); MESSAGE_("Time used to build map: " + std::to_string(time_map_build.count()) + " milliseconds."); double read_bw = double(total_file_bytes_read) / (1024.0 * 1024.0 * 1024.0); read_bw = (read_bw / time_map_build.count()) * 1000.f; MESSAGE_("Total bytes read: " + std::to_string(total_file_bytes_read) + " Effective Read B/W: " + std::to_string(read_bw) + " GB/s."); // CK_CUDA_THROW_(cudaFree(culled_index_count)); CK_CUDA_THROW_(cudaFree(accum_location)); // starting to do the convertion if (process_output) { uint32_t *dev_slot_size_array = nullptr; size_t slot_size_array_size = num_categoricals * sizeof(uint32_t); CK_CUDA_THROW_(cudaMalloc(&dev_slot_size_array, slot_size_array_size)); CK_CUDA_THROW_(cudaMemcpy(dev_slot_size_array, host_sz_per_fea.data(), slot_size_array_size, cudaMemcpyHostToDevice)); int32_t *dev_out_buffer = nullptr; int32_t *host_out_buffer = nullptr; size_t sz_output_buffer = 128 * 1024 * 1024; // 128 MB, = read_chunks CK_CUDA_THROW_(cudaMalloc(&dev_out_buffer, sz_output_buffer)); CK_CUDA_THROW_(cudaMallocHost(&host_out_buffer, sz_output_buffer)); int64_t *dev_int_col_ptrs = nullptr; int64_t *dev_int_col_nullmask_ptrs = nullptr; int64_t *dev_cat_col_nullmask_ptrs = nullptr; int64_t *dev_categorical_col_hash_obj = nullptr; int64_t *dev_char_ptrs = nullptr; int64_t *dev_offset_ptrs = nullptr; size_t sz_dev_int_col = num_numericals * sizeof(int64_t); size_t sz_dev_cat_hash_obj = num_categoricals * sizeof(map_type<key_type, value_type>); size_t sz_dev_str_ptrs = num_categoricals * sizeof(int64_t); CK_CUDA_THROW_(cudaMalloc(&dev_int_col_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(cudaMalloc(&dev_int_col_nullmask_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(cudaMalloc(&dev_cat_col_nullmask_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(cudaMalloc(&dev_categorical_col_hash_obj, sz_dev_cat_hash_obj)); CK_CUDA_THROW_(cudaMalloc(&dev_char_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(cudaMalloc(&dev_offset_ptrs, sz_dev_str_ptrs)); // encode and write out binary int maxbytes = 96 * 1024; // dynamic shared memory size 96 KB cudaFuncSetAttribute(process_data_rows<key_type, value_type>, cudaFuncAttributeMaxDynamicSharedMemorySize, maxbytes); std::vector<map_type<key_type, value_type>> categorical_col_hash_obj; for (auto c : cat_column_names) { categorical_col_hash_obj.push_back(*categorical_col_hash_tables[c]); } CK_CUDA_THROW_(cudaMemcpy((void*)dev_categorical_col_hash_obj, (void*)categorical_col_hash_obj.data(), sz_dev_cat_hash_obj, cudaMemcpyHostToDevice)); if (process_output) { std::ofstream *binary_writer = nullptr; if (write_out) binary_writer = new std::ofstream(std::string(output_dir_path + "/train_data.bin"), std::ios::binary); size_t sz_total_output_binary = 0; const auto time_convert_start = std::chrono::high_resolution_clock::now(); // train_data.bin { int32_t rows_begin_train = 0, rows_end_train = 36672493; // train.txt [:36672493) std::string input_file_path = std::string(input_dir_path + "/train.txt"); sz_total_output_binary = convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer, dev_slot_size_array, rows_begin_train, rows_end_train, 3); MESSAGE_("Porcessed file: " + input_file_path + " for /train_data.bin"); MESSAGE_("Size of train_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer) binary_writer->close(); } // validation-data and testing-data { int32_t rows_begin_val = 36672493, rows_end_val = 41256555; // train.txt [36672493, 41256555) int32_t rows_begin_test = 41256555, rows_end_test = 45840617; // train.txt [41256555, 45840617] std::string input_file_path = std::string(input_dir_path + "/train.txt"); // val std::ofstream *binary_writer_val = nullptr; if (write_out) binary_writer_val = new std::ofstream(std::string(output_dir_path + "/val_data.bin"), std::ios::binary); sz_total_output_binary = convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer_val, dev_slot_size_array, rows_begin_val, rows_end_val, 3); MESSAGE_("Size of val_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer_val) binary_writer_val->close(); // test std::ofstream *binary_writer_test = nullptr; if (write_out) binary_writer_test = new std::ofstream(std::string(output_dir_path + "/test_data.bin"), std::ios::binary); sz_total_output_binary = convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer_test, dev_slot_size_array, rows_begin_test, rows_end_test, 3); MESSAGE_("Size of test_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer_test) binary_writer_test->close(); MESSAGE_("Processed file: " + input_file_path + " for val_data.bin and test_data.bin"); } const auto time_convert_stop = std::chrono::high_resolution_clock::now(); const auto time_convert_total = std::chrono::duration_cast<std::chrono::milliseconds>(time_convert_stop - time_convert_start); MESSAGE_("Time to process binaries: " + std::to_string(time_convert_total.count()) + " milliseconds."); double p_read_bw = (double)process_read_bytes / (1024.0 * 1024.0 * 1024.0); p_read_bw = (p_read_bw / time_convert_total.count()) * 1000.f; double p_write_bw = (double)process_write_bytes / (1024.0 * 1024.0 * 1024.0); p_write_bw = (p_write_bw / time_convert_total.count()) * 1000.f; size_t total_second_pass_bytes = process_read_bytes + process_write_bytes; double p_2nd_bw = (double)total_second_pass_bytes / (1024.0 * 1024.0 * 1024.0); p_2nd_bw = (p_2nd_bw / time_convert_total.count()) * 1000.f; MESSAGE_("Convert Bytes reading: " + std::to_string(process_read_bytes) + ", Effective reading B/W: " + std::to_string(p_read_bw) + " GB/s."); MESSAGE_("Convert Bytes writing: " + std::to_string(process_write_bytes) + ", Effective reading B/W: " + std::to_string(p_write_bw) + " GB/s."); MESSAGE_("Convert Bytes total: " + std::to_string(total_second_pass_bytes) + ", Effective reading B/W: " + std::to_string(p_2nd_bw) + " GB/s."); } const auto program_end_time = std::chrono::high_resolution_clock::now(); const auto application_time = std::chrono::duration_cast<std::chrono::milliseconds>(program_end_time - time_map_start); double app_bw = (double)total_file_bytes_read / (1024.0 * 1024.0 * 1024.0); app_bw = (app_bw / application_time.count()) * 1000.f; MESSAGE_("Application process B/W: " + std::to_string(app_bw) + " GB/s."); CK_CUDA_THROW_(cudaFree(dev_out_buffer)); CK_CUDA_THROW_(cudaFreeHost(host_out_buffer)); CK_CUDA_THROW_(cudaFree(dev_int_col_ptrs)); CK_CUDA_THROW_(cudaFree(dev_int_col_nullmask_ptrs)); CK_CUDA_THROW_(cudaFree(dev_categorical_col_hash_obj)); CK_CUDA_THROW_(cudaFree(dev_char_ptrs)); CK_CUDA_THROW_(cudaFree(dev_offset_ptrs)); CK_CUDA_THROW_(cudaFree(dev_slot_size_array)); CK_CUDA_THROW_(cudaFree(dev_cat_col_nullmask_ptrs)); } // destory map objects for (auto c : categorical_col_hash_tables) c.second->destroy(); delete p_mr; p_mr = nullptr; } void process_terabyte_dataset(const std::string& input_dir_path, const std::string& output_dir_path, const int num_numericals, const int num_categoricals, const std::vector<std::string>& train_days, const std::vector<std::string>& test_days) { int max_chunk_per_file = 10000; // loop count, in a signle binary data, store how many chunks bool process_output = true; bool write_out = true; int min_cat_fea_cardi = 40000000; // 40M int32_t hash_bucket = min_cat_fea_cardi; std::vector<int32_t> hist_sizes = {hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket, hash_bucket}; // mod-idx size_t pool_alloc_size = (size_t)10 * 1024 * 1024 * 1024; // 10 GB rmm::mr::device_memory_resource *base_mr = new rmm::mr::cuda_memory_resource(); auto *p_mr = new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(base_mr, pool_alloc_size); rmm::mr::set_current_device_resource(p_mr); std::vector<std::string> column_dtypes; // dtypes of label, dense, categorical std::vector<std::string> column_names; // names of label, dense, categorical std::vector<std::string> cat_column_names; // names of categorical std::map<std::string, int32_t> column_name_to_col_idx; // <col-name, idx> std::unordered_map<std::string, map_type<key_type, value_type>*> categorical_col_hash_tables; // <name, <key, value>> // label column_dtypes.push_back("int32"); column_names.push_back("label"); column_name_to_col_idx.insert(std::make_pair("label", 0)); // dense-features for (int k = 1; k <= 13; k++) { column_dtypes.push_back("int32"); std::string name = "I" + std::to_string(k); column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k)); } // categorical-features for (int k = 1; k <= num_categoricals; k++) { column_dtypes.push_back("str"); std::string name = "C" + std::to_string(k); column_names.push_back(name); cat_column_names.push_back(name); column_name_to_col_idx.insert(std::make_pair(name, k+num_numericals-1)); auto cuda_map_obj = map_type<key_type, value_type>::create(compute_hash_table_size(hist_sizes[k-1])).release();; categorical_col_hash_tables.insert(std::make_pair(name, cuda_map_obj)); } int current_device = 0; cudaDeviceProp prop; CK_CUDA_THROW_(cudaGetDeviceProperties(&prop, current_device)); size_t read_chunks = 128 * 1024 * 1024; // read 128MB at one time uint32_t *accum_location = nullptr; // slot-size CK_CUDA_THROW_(cudaMalloc(&accum_location, 128)); // 128 Bytes = 32 * uint32_t CK_CUDA_THROW_(cudaMemset(accum_location, 0, 128)); // uint32_t *culled_index_count = nullptr; // CK_CUDA_THROW_(cudaMalloc(&culled_index_count, 128)); // 128 Bytes = 32 * uint32_t size_t total_file_bytes_read = 0; const auto time_map_start = std::chrono::high_resolution_clock::now(); // iteration on each day's data, including training and testing. std::vector<std::string> all_days; all_days.insert(all_days.end(), train_days.begin(), train_days.end()); all_days.insert(all_days.end(), test_days.begin(), test_days.end()); std::vector<size_t> sample_nums; for (const auto& day : all_days) { // get file size std::string input_file_name = input_dir_path + "/day_" + day; std::ifstream binary_reader(input_file_name, std::ios::binary); binary_reader.seekg(0, std::ios::end); size_t file_size = binary_reader.tellg(); binary_reader.close(); // csv arguments, https://docs.rapids.ai/api/libcudf/stable/structcudf_1_1io_1_1read__csv__args.html cudf_io::csv_reader_options in_args = cudf_io::csv_reader_options::builder( cudf_io::source_info{input_file_name}).header(-1); in_args.set_dtypes(column_dtypes); in_args.set_names(column_names); in_args.set_delimiter('\t'); in_args.set_byte_range_size(read_chunks); // how many bytes to read at one time. in_args.set_skipfooter(0); in_args.set_skiprows(0); in_args.set_use_cols_names(cat_column_names); int32_t total_row_nums = 0; int loop_count = 0; while (true) { total_file_bytes_read += in_args.get_byte_range_size(); cudf_io::table_with_metadata tbl_w_metadata = cudf_io::read_csv(in_args, p_mr); total_row_nums += tbl_w_metadata.tbl->num_rows(); dim3 block(prop.maxThreadsPerBlock, 1, 1); dim3 grid((tbl_w_metadata.tbl->num_rows() - 1) / block.x + 1, 1, 1); // categorical-features for (unsigned int k = 0; k < cat_column_names.size(); ++k) { auto col = std::move(tbl_w_metadata.tbl->get_column(k)); if (col.type().id() == cudf::type_id::STRING) { auto str_col = cudf::strings_column_view(col.view()); int64_t num_strings = str_col.size(); char *char_array = const_cast<char*>(str_col.chars().data<char>()); int32_t *offsets = const_cast<int32_t*>(str_col.offsets().data<int32_t>()); build_categorical_index<key_type, value_type><<<grid, block>>>(char_array, offsets, num_strings, // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else if (col.type().id() == cudf::type_id::INT32) { key_type *data = const_cast<key_type*>(col.view().data<key_type>()); bitmask_type *in_mask = const_cast<bitmask_type*>(col.view().null_mask()); build_categorical_index_from_ints<key_type, value_type><<<grid, block>>>(data, in_mask, tbl_w_metadata.tbl->num_rows(), // *categorical_col_hash_tables[cat_column_names[k]], hash_bucket, &accum_location[k]); *categorical_col_hash_tables[cat_column_names[k]], hist_sizes[k], &accum_location[k]); } else { ERROR_MESSAGE_("col.type().id() != [STRING, INT32]"); } } size_t new_byte_range_offset = in_args.get_byte_range_offset() + read_chunks; in_args.set_byte_range_offset(new_byte_range_offset); if (in_args.get_byte_range_offset() >= file_size) break; if ((in_args.get_byte_range_offset() + read_chunks) > file_size) { size_t new_byte_range_size = file_size - in_args.get_byte_range_offset(); in_args.set_byte_range_size(new_byte_range_size); } ++loop_count; if (loop_count == max_chunk_per_file) break; } MESSAGE_(input_file_name + "'s total rows number = " + std::to_string(total_row_nums)); sample_nums.push_back(total_row_nums); } // end for all_days // show: slot size array std::vector<uint32_t> host_sz_per_fea(num_categoricals); CK_CUDA_THROW_(cudaMemcpy(host_sz_per_fea.data(), accum_location, num_categoricals * sizeof(uint32_t), cudaMemcpyDeviceToHost)); MESSAGE_("Slot size array, missing value mapped to unused key: "); for (auto c : host_sz_per_fea) std::cout << (c) << ", "; std::cout << "\b\b" << std::endl; const auto time_map_stop = std::chrono::high_resolution_clock::now(); const auto time_map_build = std::chrono::duration_cast<std::chrono::milliseconds>(time_map_stop - time_map_start); MESSAGE_("Time used to build map: " + std::to_string(time_map_build.count()) + " milliseconds."); double read_bw = double(total_file_bytes_read) / (1024.0 * 1024.0 * 1024.0); read_bw = (read_bw / time_map_build.count()) * 1000.f; MESSAGE_("Total bytes read: " + std::to_string(total_file_bytes_read) + " Effective Read B/W: " + std::to_string(read_bw) + " GB/s."); // CK_CUDA_THROW_(cudaFree(culled_index_count)); CK_CUDA_THROW_(cudaFree(accum_location)); // starting to do the convertion if (process_output) { uint32_t *dev_slot_size_array = nullptr; size_t slot_size_array_size = num_categoricals * sizeof(uint32_t); CK_CUDA_THROW_(cudaMalloc(&dev_slot_size_array, slot_size_array_size)); CK_CUDA_THROW_(cudaMemcpy(dev_slot_size_array, host_sz_per_fea.data(), slot_size_array_size, cudaMemcpyHostToDevice)); int32_t *dev_out_buffer = nullptr; int32_t *host_out_buffer = nullptr; size_t sz_output_buffer = 128 * 1024 * 1024; // 128 MB, = read_chunks CK_CUDA_THROW_(cudaMalloc(&dev_out_buffer, sz_output_buffer)); CK_CUDA_THROW_(cudaMallocHost(&host_out_buffer, sz_output_buffer)); int64_t *dev_int_col_ptrs = nullptr; int64_t *dev_int_col_nullmask_ptrs = nullptr; int64_t *dev_cat_col_nullmask_ptrs = nullptr; int64_t *dev_categorical_col_hash_obj = nullptr; int64_t *dev_char_ptrs = nullptr; int64_t *dev_offset_ptrs = nullptr; size_t sz_dev_int_col = num_numericals * sizeof(int64_t); size_t sz_dev_cat_hash_obj = num_categoricals * sizeof(map_type<key_type, value_type>); size_t sz_dev_str_ptrs = num_categoricals * sizeof(int64_t); CK_CUDA_THROW_(cudaMalloc(&dev_int_col_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(cudaMalloc(&dev_int_col_nullmask_ptrs, sz_dev_int_col)); CK_CUDA_THROW_(cudaMalloc(&dev_cat_col_nullmask_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(cudaMalloc(&dev_categorical_col_hash_obj, sz_dev_cat_hash_obj)); CK_CUDA_THROW_(cudaMalloc(&dev_char_ptrs, sz_dev_str_ptrs)); CK_CUDA_THROW_(cudaMalloc(&dev_offset_ptrs, sz_dev_str_ptrs)); // encode and write out binary int maxbytes = 96 * 1024; // dynamic shared memory size 96 KB cudaFuncSetAttribute(process_data_rows<key_type, value_type>, cudaFuncAttributeMaxDynamicSharedMemorySize, maxbytes); std::vector<map_type<key_type, value_type>> categorical_col_hash_obj; for (auto c : cat_column_names) { categorical_col_hash_obj.push_back(*categorical_col_hash_tables[c]); } CK_CUDA_THROW_(cudaMemcpy((void*)dev_categorical_col_hash_obj, (void*)categorical_col_hash_obj.data(), sz_dev_cat_hash_obj, cudaMemcpyHostToDevice)); if (process_output) { const auto time_convert_start = std::chrono::high_resolution_clock::now(); std::ofstream *binary_writer = nullptr; if (write_out) binary_writer = new std::ofstream(std::string(output_dir_path + "/train_data.bin"), std::ios::binary); size_t sz_total_output_binary = 0; // train_data.bin size_t saved_samples_num = 0; for (size_t i = 0; i < train_days.size(); i++) { const auto& day = train_days[i]; size_t needed_samples_num = 4195197692 - saved_samples_num; // total should be 4195197692 int32_t rows_begin_train = -1, rows_end_train = -1; // train.txt [:36672000) if (needed_samples_num < sample_nums[i]) rows_end_train = needed_samples_num; std::string input_file_path = input_dir_path + "/day_" + day; sz_total_output_binary += convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer, dev_slot_size_array, rows_begin_train, rows_end_train, 1); MESSAGE_("Porcessed file: " + input_file_path + " for /train_data.bin"); if (needed_samples_num < sample_nums[i]) { saved_samples_num += needed_samples_num; break; } else { saved_samples_num += sample_nums[i]; } } // end for train_days MESSAGE_("Size of train_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer) binary_writer->close(); // testing-data { // test_data.bin std::ofstream *binary_writer_test = nullptr; if (write_out) binary_writer_test = new std::ofstream(std::string(output_dir_path + "/test_data.bin"), std::ios::binary); sz_total_output_binary = 0; size_t saved_samples_num = 0; for (size_t i = 0; i < test_days.size(); ++i) { const auto& day = test_days[i]; size_t needed_samples_num = 89137319 - saved_samples_num; // total should be 89137319 int32_t rows_begin_test = -1, rows_end_test = -1; if (needed_samples_num < sample_nums[train_days.size() + i]) rows_end_test = needed_samples_num; // rows_begin_test = 89137318; rows_end_test = -1; // [89137318: ), second half std::string input_file_path = input_dir_path + "/day_" + day; sz_total_output_binary += convert_input_binaries<key_type, value_type>(p_mr, input_file_path, column_dtypes, column_names, hash_bucket, max_chunk_per_file, 0, false, dev_int_col_ptrs, dev_int_col_nullmask_ptrs, dev_cat_col_nullmask_ptrs, dev_categorical_col_hash_obj, dev_char_ptrs, dev_offset_ptrs, dev_out_buffer, host_out_buffer, binary_writer_test, dev_slot_size_array, rows_begin_test, rows_end_test, 1); MESSAGE_("Porcessed file: " + input_file_path + " for /test_data.bin"); if (needed_samples_num < sample_nums[train_days.size() + i]) { saved_samples_num += needed_samples_num; break; } else { saved_samples_num += sample_nums[train_days.size() + i]; } } // end for test_days MESSAGE_("Size of test_data.bin: " + std::to_string(sz_total_output_binary) + " Bytes."); if (binary_writer_test) binary_writer_test->close(); } const auto time_convert_stop = std::chrono::high_resolution_clock::now(); const auto time_convert_total = std::chrono::duration_cast<std::chrono::milliseconds>(time_convert_stop - time_convert_start); MESSAGE_("Time to process binaries: " + std::to_string(time_convert_total.count()) + " milliseconds."); double p_read_bw = (double)process_read_bytes / (1024.0 * 1024.0 * 1024.0); p_read_bw = (p_read_bw / time_convert_total.count()) * 1000.f; double p_write_bw = (double)process_write_bytes / (1024.0 * 1024.0 * 1024.0); p_write_bw = (p_write_bw / time_convert_total.count()) * 1000.f; size_t total_second_pass_bytes = process_read_bytes + process_write_bytes; double p_2nd_bw = (double)total_second_pass_bytes / (1024.0 * 1024.0 * 1024.0); p_2nd_bw = (p_2nd_bw / time_convert_total.count()) * 1000.f; MESSAGE_("Convert Bytes reading: " + std::to_string(process_read_bytes) + ", Effective reading B/W: " + std::to_string(p_read_bw) + " GB/s."); MESSAGE_("Convert Bytes writing: " + std::to_string(process_write_bytes) + ", Effective reading B/W: " + std::to_string(p_write_bw) + " GB/s."); MESSAGE_("Convert Bytes total: " + std::to_string(total_second_pass_bytes) + ", Effective reading B/W: " + std::to_string(p_2nd_bw) + " GB/s."); } const auto program_end_time = std::chrono::high_resolution_clock::now(); const auto application_time = std::chrono::duration_cast<std::chrono::milliseconds>(program_end_time - time_map_start); double app_bw = (double)total_file_bytes_read / (1024.0 * 1024.0 * 1024.0); app_bw = (app_bw / application_time.count()) * 1000.f; MESSAGE_("Application process B/W: " + std::to_string(app_bw) + " GB/s."); CK_CUDA_THROW_(cudaFree(dev_out_buffer)); CK_CUDA_THROW_(cudaFreeHost(host_out_buffer)); CK_CUDA_THROW_(cudaFree(dev_int_col_ptrs)); CK_CUDA_THROW_(cudaFree(dev_int_col_nullmask_ptrs)); CK_CUDA_THROW_(cudaFree(dev_categorical_col_hash_obj)); CK_CUDA_THROW_(cudaFree(dev_char_ptrs)); CK_CUDA_THROW_(cudaFree(dev_offset_ptrs)); CK_CUDA_THROW_(cudaFree(dev_slot_size_array)); CK_CUDA_THROW_(cudaFree(dev_cat_col_nullmask_ptrs)); } // destory map objects for (auto c : categorical_col_hash_tables) c.second->destroy(); delete p_mr; p_mr = nullptr; } int main(const int argc, const char *argv[]) { if (argc < 3) { MESSAGE_("Need min 2 args: input_dir output_dir"); MESSAGE_("Usage for Kaggle Datasets: ./dlrm_raw input_dir output_dir"); MESSAGE_("Usage for TeraBytes Datasets: ./dlrm_raw input_dir output_dir --train [days for training] --test [days for testing]" ", those days are seperated with comma, no whitespace."); return -1; } const int num_numericals = 14; // label + 13 int-dense-feature const int num_categoricals = 26; // 26 int-categorical-feature std::string input_dir_path(argv[1]); std::string output_dir_path(argv[2]); switch (argc) { case 3: { MESSAGE_("Processing Kaggle datasets"); MESSAGE_("input_dir: " + input_dir_path); MESSAGE_("output_dir: " + output_dir_path); process_kaggle_dataset(input_dir_path, output_dir_path, num_numericals, num_categoricals); break; } case 7: { if (argc == 7 && (std::strcmp(argv[3], "--train") != 0 || std::strcmp(argv[5], "--test") != 0)) { MESSAGE_("Usage for TeraBytes Datasets: ./dlrm_raw input_dir output_dir --train [days for training] --test [days for testing]" ", those days are seperated with comma, no whitespace."); MESSAGE_("For example: ./dlrm_raw ./ ./ --train 0,1,2,3,4 --test 5,6,7"); return -1; } const std::vector<std::string> train_days = split_string(std::string(argv[4]), ","); const std::vector<std::string> test_days = split_string(std::string(argv[6]), ","); MESSAGE_("Processing TeraBytes datasets."); MESSAGE_("input_dir: " + input_dir_path); MESSAGE_("output_dir: " + output_dir_path); MESSAGE_("days for training: " + std::string(argv[4])); MESSAGE_("days for testing: " + std::string(argv[6])); process_terabyte_dataset(input_dir_path, output_dir_path, num_numericals, num_categoricals, train_days, test_days); break; } default: { MESSAGE_("Usage for Kaggle Datasets: ./dlrm_raw input_dir output_dir"); MESSAGE_("Usage for TeraBytes Datasets: ./dlrm_raw input_dir output_dir --train [days for training] --test [days for testing]" ", those days are seperated with comma, no whitespace."); return -1; break; } } MESSAGE_("Done."); return 0; }
4e5d880ea4aa7c8a4c35aac9a1c76a2b56514152.hip
// !!! This is a file automatically generated by hipify!!! /* * Example of how to use the mxGPUArray API in a MEX file. This example shows * how to write a MEX function that takes a gpuArray input and returns a * gpuArray output, e.g. B=mexFunction(A). * * Copyright 2012 The MathWorks, Inc. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <stdint.h> #include "mex.h" #include "gpu/mxGPUArray.h" #include <cstdlib> #include <algorithm> #include <iostream> using namespace std; const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, NchanMax = 128, block = 32, NrankMax = 3; ////////////////////////////////////////////////////////////////////////////////////////// __global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){ volatile __shared__ float sW[nt0*NrankMax], sdata[(Nthreads+nt0)*NrankMax]; float x; int tid, tid0, bid, i, nid, Nrank, NT, Nfilt; tid = threadIdx.x; bid = blockIdx.x; Nfilt = (int) Params[1]; NT = (int) Params[0]; Nrank = (int) Params[6]; if(tid<nt0*((int) Params[6])) sW[tid]= W[tid%nt0 + (bid + Nfilt * (tid/nt0))* nt0]; __syncthreads(); tid0 = 0; while (tid0<NT-Nthreads-nt0+1){ if (tid<nt0*NrankMax) sdata[tid%nt0 + (tid/nt0)*(Nthreads+nt0)] = data[tid0 + tid%nt0+ NT*(bid + Nfilt*(tid/nt0))]; #pragma unroll 3 for(nid=0;nid<Nrank;nid++){ sdata[tid + nt0+nid*(Nthreads+nt0)] = data[nt0+tid0 + tid+ NT*(bid +nid*Nfilt)]; } __syncthreads(); x = 0.0f; for(nid=0;nid<Nrank;nid++){ #pragma unroll 4 for(i=0;i<nt0;i++) x += sW[i + nid*nt0] * sdata[i+tid + nid*(Nthreads+nt0)]; } conv_sig[tid0 + tid + NT*bid] = x; tid0+=Nthreads; __syncthreads(); } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void bestFilter(const double *Params, const float *data, const float *mu, const float *lam, float *xbest, float *err, int *ftype){ int tid, tid0, i, bid, NT, Nfilt, ibest = 0; float Th, Cf, Ci, xb, Cbest = 0.0f; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nfilt = (int) Params[1]; Th = (float) Params[2]; tid0 = tid + bid * Nthreads; if (tid0<NT){ for (i=0; i<Nfilt;i++){ Ci = data[tid0 + NT * i] + mu[i] * lam[i]; Cf = Ci * Ci / (lam[i] + 1.0f) - lam[i]*mu[i]*mu[i]; if (Cf > Cbest){ Cbest = Cf; xb = Ci - lam[i] * mu[i]; // /(lam[i] + 1); ibest = i; } } if (Cbest > Th*Th){ err[tid0] = Cbest; xbest[tid0] = xb; ftype[tid0] = ibest; } } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void cleanup_spikes(const double *Params, const float *xbest, const float *err, const int *ftype, int *st, int *id, float *x, float *C, int *counter){ int indx, maxFR, NTOT, tid, bid, NT, tid0, j; volatile __shared__ float sdata[Nthreads+2*lockout+1]; bool flag=0; float err0; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; maxFR = (int) Params[3]; tid0 = bid * Nthreads; if(tid0<NT-Nthreads-lockout+1){ if (tid<2*lockout) sdata[tid] = err[tid0 + tid]; sdata[tid+2*lockout] = err[2*lockout + tid0 + tid]; __syncthreads(); err0 = sdata[tid+lockout]; if(err0>1e-10){ flag = 0; for(j=-lockout;j<=lockout;j++) if(sdata[tid+lockout+j]>err0){ flag = 1; break; } if(flag==0){ indx = atomicAdd(&counter[0], 1); if (indx<maxFR){ st[indx] = tid+lockout + tid0; id[indx] = ftype[tid+lockout + tid0]; x[indx] = xbest[tid+lockout + tid0]; C[indx] = err0; } } } } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void subSpikes(const double *Params, const int *st, const int *id, const float *x, const int *counter, float *dout, const float *WtW){ int tid, bid, NT, ind, tcurr, Nfilt; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nfilt = (int) Params[1]; for(ind=counter[1]; ind<counter[0];ind++){ tcurr = tid + st[ind]-nt0+1; if (tcurr>=0 & tcurr<NT) dout[tcurr + bid*NT] -= x[ind] * WtW[tid + id[ind]*(2*nt0-1) + (2*nt0-1)*Nfilt*bid]; } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void subtract_spikes(const double *Params, const int *st, const int *id, const float *x, const int *counter, float *dataraw, const float *W, const float *U){ int tid, bid, Nblocks, i, NT, ind, Nchan; __shared__ float sh_W[nt0], sh_U[NchanMax]; tid = threadIdx.x; bid = blockIdx.x; Nblocks = gridDim.x; NT = (int) Params[0]; Nchan = (int) Params[5]; ind = bid; while(ind<counter[0]){ if (tid<nt0) sh_W[tid] = W[tid + nt0*id[ind]]; sh_U[tid] = U[tid + Nchan*id[ind]]; __syncthreads(); for (i=0;i<nt0;i++) dataraw[i + st[ind] + NT * tid] -= x[ind] * sh_W[i] * sh_U[tid]; ind+= Nblocks; __syncthreads(); } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void getWgradient(const double *Params, const int *st, const int *id, const float *x, const int *counter, const float *datarez, const float *U, float *dW){ int tid, bid, i, ind, NT, Nchan; float xprod; volatile __shared__ float sh_U[NchanMax]; NT = (int) Params[0]; Nchan = (int) Params[5]; tid = threadIdx.x; bid = blockIdx.x; while(tid<Nchan){ sh_U[tid] = U[tid + bid*Nchan]; tid+= blockDim.x; } tid = threadIdx.x; __syncthreads(); for(ind=0; ind<counter[0];ind++) if (id[ind]==bid){ xprod = 0.0f; for (i=0;i<Nchan;i++) xprod+= sh_U[i] * datarez[st[ind] + tid + NT * i]; dW[tid + nt0 * bid] += xprod * x[ind]; } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void getUgradient(const double *Params, const int *st, const int *id, const float *x, const int *counter, const float *datarez, const float *W, float *dU){ int j, tid, bid, i, ind, NT, Nchan; float xprod; volatile __shared__ float sh_M[NchanMax*nt0], sh_W[nt0]; NT = (int) Params[0]; Nchan = (int) Params[5]; tid = threadIdx.x; bid = blockIdx.x; if (tid<nt0) sh_W[tid] = W[tid + nt0*bid]; __syncthreads(); for(ind=0; ind<counter[0];ind++) if (id[ind]==bid){ if(tid<nt0) for (j=0;j<Nchan;j++) sh_M[tid + nt0*j] = datarez[tid + st[ind] + NT*j]; __syncthreads(); xprod = 0.0f; for (i=0;i<nt0;i++) xprod+= sh_W[i] * sh_M[i + tid*nt0]; dU[tid + bid*Nchan] += xprod * x[ind]; __syncthreads(); } } ////////////////////////////////////////////////////////////////////////////////////////// /* * Host code */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { /* Declare input variables*/ double *Params, *d_Params; int blocksPerGrid, NT, maxFR, Nchan; int const threadsPerBlock = Nthreads; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* read Params and copy to GPU */ Params = (double*) mxGetData(prhs[0]); NT = (int) Params[0]; blocksPerGrid = (int) Params[1]; maxFR = (int) Params[3]; Nchan = (int) Params[5]; hipMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0])); hipMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),hipMemcpyHostToDevice); /* collect input GPU variables*/ mxGPUArray const *W, *data, *WtW, *mu, *lam; const float *d_W, *d_data, *d_WtW, *d_mu, *d_lam; data = mxGPUCreateFromMxArray(prhs[1]); d_data = (float const *)(mxGPUGetDataReadOnly(data)); W = mxGPUCreateFromMxArray(prhs[2]); d_W = (float const *)(mxGPUGetDataReadOnly(W)); WtW = mxGPUCreateFromMxArray(prhs[3]); d_WtW = (float const *)(mxGPUGetDataReadOnly(WtW)); mu = mxGPUCreateFromMxArray(prhs[4]); d_mu = (float const *)(mxGPUGetDataReadOnly(mu)); lam = mxGPUCreateFromMxArray(prhs[5]); d_lam = (float const *)(mxGPUGetDataReadOnly(lam)); /* allocate new GPU variables*/ float *d_err,*d_C, *d_xbest, *d_x, *d_dout; int *d_st, *d_ftype, *d_id, *d_counter; hipMalloc(&d_dout, NT * blocksPerGrid* sizeof(float)); hipMalloc(&d_err, NT * sizeof(float)); hipMalloc(&d_xbest, NT * sizeof(float)); hipMalloc(&d_ftype, NT * sizeof(int)); hipMalloc(&d_st, maxFR * sizeof(int)); hipMalloc(&d_id, maxFR * sizeof(int)); hipMalloc(&d_x, maxFR * sizeof(float)); hipMalloc(&d_C, maxFR * sizeof(float)); hipMalloc(&d_counter, 2*sizeof(int)); hipMemset(d_dout, 0, NT * blocksPerGrid * sizeof(float)); hipMemset(d_counter, 0, 2*sizeof(int)); hipMemset(d_st, 0, maxFR * sizeof(int)); hipMemset(d_id, 0, maxFR * sizeof(int)); hipMemset(d_x, 0, maxFR * sizeof(float)); hipMemset(d_C, 0, maxFR * sizeof(float)); int *counter; counter = (int*) calloc(1,sizeof(int)); hipLaunchKernelGGL(( Conv1D), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_Params, d_data, d_W, d_dout); for(int k=0;k<(int) Params[4];k++){ hipMemset(d_err, 0, NT * sizeof(float)); hipMemset(d_ftype, 0, NT * sizeof(int)); hipMemset(d_xbest, 0, NT * sizeof(float)); hipLaunchKernelGGL(( bestFilter), dim3(NT/Nthreads),dim3(threadsPerBlock), 0, 0, d_Params, d_dout, d_mu, d_lam, d_xbest, d_err, d_ftype); hipLaunchKernelGGL(( cleanup_spikes), dim3(NT/Nthreads),dim3(threadsPerBlock), 0, 0, d_Params, d_xbest, d_err, d_ftype, d_st, d_id, d_x, d_C, d_counter); hipMemcpy(counter, d_counter, sizeof(int), hipMemcpyDeviceToHost); if (counter[0]>maxFR){ counter[0] = maxFR; hipMemcpy(d_counter, counter, sizeof(int), hipMemcpyHostToDevice); } hipLaunchKernelGGL(( subSpikes), dim3(blocksPerGrid), dim3(2*nt0-1), 0, 0, d_Params, d_st, d_id, d_x, d_counter, d_dout, d_WtW); hipMemcpy(d_counter+1, d_counter, sizeof(int), hipMemcpyDeviceToHost); if(counter[0]==maxFR) break; } float *x, *C; int *st, *id; int minSize; if (counter[0]<maxFR) minSize = counter[0]; else minSize = maxFR; const mwSize dimst[] = {minSize,1}; plhs[0] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL); st = (int*) mxGetData(plhs[0]); plhs[1] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL); id = (int*) mxGetData(plhs[1]); plhs[2] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL); x = (float*) mxGetData(plhs[2]); plhs[3] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL); C = (float*) mxGetData(plhs[3]); hipMemcpy(st, d_st, minSize * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(id, d_id, minSize * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(x, d_x, minSize * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(C, d_C, minSize * sizeof(float), hipMemcpyDeviceToHost); hipFree(d_ftype); hipFree(d_err); hipFree(d_xbest); hipFree(d_st); hipFree(d_id); hipFree(d_x); hipFree(d_C); hipFree(d_counter); hipFree(d_Params); hipFree(d_dout); mxGPUDestroyGPUArray(data); mxGPUDestroyGPUArray(WtW); mxGPUDestroyGPUArray(W); mxGPUDestroyGPUArray(mu); mxGPUDestroyGPUArray(lam); }
4e5d880ea4aa7c8a4c35aac9a1c76a2b56514152.cu
/* * Example of how to use the mxGPUArray API in a MEX file. This example shows * how to write a MEX function that takes a gpuArray input and returns a * gpuArray output, e.g. B=mexFunction(A). * * Copyright 2012 The MathWorks, Inc. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <stdint.h> #include "mex.h" #include "gpu/mxGPUArray.h" #include <cstdlib> #include <algorithm> #include <iostream> using namespace std; const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, NchanMax = 128, block = 32, NrankMax = 3; ////////////////////////////////////////////////////////////////////////////////////////// __global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){ volatile __shared__ float sW[nt0*NrankMax], sdata[(Nthreads+nt0)*NrankMax]; float x; int tid, tid0, bid, i, nid, Nrank, NT, Nfilt; tid = threadIdx.x; bid = blockIdx.x; Nfilt = (int) Params[1]; NT = (int) Params[0]; Nrank = (int) Params[6]; if(tid<nt0*((int) Params[6])) sW[tid]= W[tid%nt0 + (bid + Nfilt * (tid/nt0))* nt0]; __syncthreads(); tid0 = 0; while (tid0<NT-Nthreads-nt0+1){ if (tid<nt0*NrankMax) sdata[tid%nt0 + (tid/nt0)*(Nthreads+nt0)] = data[tid0 + tid%nt0+ NT*(bid + Nfilt*(tid/nt0))]; #pragma unroll 3 for(nid=0;nid<Nrank;nid++){ sdata[tid + nt0+nid*(Nthreads+nt0)] = data[nt0+tid0 + tid+ NT*(bid +nid*Nfilt)]; } __syncthreads(); x = 0.0f; for(nid=0;nid<Nrank;nid++){ #pragma unroll 4 for(i=0;i<nt0;i++) x += sW[i + nid*nt0] * sdata[i+tid + nid*(Nthreads+nt0)]; } conv_sig[tid0 + tid + NT*bid] = x; tid0+=Nthreads; __syncthreads(); } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void bestFilter(const double *Params, const float *data, const float *mu, const float *lam, float *xbest, float *err, int *ftype){ int tid, tid0, i, bid, NT, Nfilt, ibest = 0; float Th, Cf, Ci, xb, Cbest = 0.0f; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nfilt = (int) Params[1]; Th = (float) Params[2]; tid0 = tid + bid * Nthreads; if (tid0<NT){ for (i=0; i<Nfilt;i++){ Ci = data[tid0 + NT * i] + mu[i] * lam[i]; Cf = Ci * Ci / (lam[i] + 1.0f) - lam[i]*mu[i]*mu[i]; if (Cf > Cbest){ Cbest = Cf; xb = Ci - lam[i] * mu[i]; // /(lam[i] + 1); ibest = i; } } if (Cbest > Th*Th){ err[tid0] = Cbest; xbest[tid0] = xb; ftype[tid0] = ibest; } } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void cleanup_spikes(const double *Params, const float *xbest, const float *err, const int *ftype, int *st, int *id, float *x, float *C, int *counter){ int indx, maxFR, NTOT, tid, bid, NT, tid0, j; volatile __shared__ float sdata[Nthreads+2*lockout+1]; bool flag=0; float err0; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; maxFR = (int) Params[3]; tid0 = bid * Nthreads; if(tid0<NT-Nthreads-lockout+1){ if (tid<2*lockout) sdata[tid] = err[tid0 + tid]; sdata[tid+2*lockout] = err[2*lockout + tid0 + tid]; __syncthreads(); err0 = sdata[tid+lockout]; if(err0>1e-10){ flag = 0; for(j=-lockout;j<=lockout;j++) if(sdata[tid+lockout+j]>err0){ flag = 1; break; } if(flag==0){ indx = atomicAdd(&counter[0], 1); if (indx<maxFR){ st[indx] = tid+lockout + tid0; id[indx] = ftype[tid+lockout + tid0]; x[indx] = xbest[tid+lockout + tid0]; C[indx] = err0; } } } } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void subSpikes(const double *Params, const int *st, const int *id, const float *x, const int *counter, float *dout, const float *WtW){ int tid, bid, NT, ind, tcurr, Nfilt; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nfilt = (int) Params[1]; for(ind=counter[1]; ind<counter[0];ind++){ tcurr = tid + st[ind]-nt0+1; if (tcurr>=0 & tcurr<NT) dout[tcurr + bid*NT] -= x[ind] * WtW[tid + id[ind]*(2*nt0-1) + (2*nt0-1)*Nfilt*bid]; } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void subtract_spikes(const double *Params, const int *st, const int *id, const float *x, const int *counter, float *dataraw, const float *W, const float *U){ int tid, bid, Nblocks, i, NT, ind, Nchan; __shared__ float sh_W[nt0], sh_U[NchanMax]; tid = threadIdx.x; bid = blockIdx.x; Nblocks = gridDim.x; NT = (int) Params[0]; Nchan = (int) Params[5]; ind = bid; while(ind<counter[0]){ if (tid<nt0) sh_W[tid] = W[tid + nt0*id[ind]]; sh_U[tid] = U[tid + Nchan*id[ind]]; __syncthreads(); for (i=0;i<nt0;i++) dataraw[i + st[ind] + NT * tid] -= x[ind] * sh_W[i] * sh_U[tid]; ind+= Nblocks; __syncthreads(); } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void getWgradient(const double *Params, const int *st, const int *id, const float *x, const int *counter, const float *datarez, const float *U, float *dW){ int tid, bid, i, ind, NT, Nchan; float xprod; volatile __shared__ float sh_U[NchanMax]; NT = (int) Params[0]; Nchan = (int) Params[5]; tid = threadIdx.x; bid = blockIdx.x; while(tid<Nchan){ sh_U[tid] = U[tid + bid*Nchan]; tid+= blockDim.x; } tid = threadIdx.x; __syncthreads(); for(ind=0; ind<counter[0];ind++) if (id[ind]==bid){ xprod = 0.0f; for (i=0;i<Nchan;i++) xprod+= sh_U[i] * datarez[st[ind] + tid + NT * i]; dW[tid + nt0 * bid] += xprod * x[ind]; } } ////////////////////////////////////////////////////////////////////////////////////////// __global__ void getUgradient(const double *Params, const int *st, const int *id, const float *x, const int *counter, const float *datarez, const float *W, float *dU){ int j, tid, bid, i, ind, NT, Nchan; float xprod; volatile __shared__ float sh_M[NchanMax*nt0], sh_W[nt0]; NT = (int) Params[0]; Nchan = (int) Params[5]; tid = threadIdx.x; bid = blockIdx.x; if (tid<nt0) sh_W[tid] = W[tid + nt0*bid]; __syncthreads(); for(ind=0; ind<counter[0];ind++) if (id[ind]==bid){ if(tid<nt0) for (j=0;j<Nchan;j++) sh_M[tid + nt0*j] = datarez[tid + st[ind] + NT*j]; __syncthreads(); xprod = 0.0f; for (i=0;i<nt0;i++) xprod+= sh_W[i] * sh_M[i + tid*nt0]; dU[tid + bid*Nchan] += xprod * x[ind]; __syncthreads(); } } ////////////////////////////////////////////////////////////////////////////////////////// /* * Host code */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { /* Declare input variables*/ double *Params, *d_Params; int blocksPerGrid, NT, maxFR, Nchan; int const threadsPerBlock = Nthreads; /* Initialize the MathWorks GPU API. */ mxInitGPU(); /* read Params and copy to GPU */ Params = (double*) mxGetData(prhs[0]); NT = (int) Params[0]; blocksPerGrid = (int) Params[1]; maxFR = (int) Params[3]; Nchan = (int) Params[5]; cudaMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0])); cudaMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),cudaMemcpyHostToDevice); /* collect input GPU variables*/ mxGPUArray const *W, *data, *WtW, *mu, *lam; const float *d_W, *d_data, *d_WtW, *d_mu, *d_lam; data = mxGPUCreateFromMxArray(prhs[1]); d_data = (float const *)(mxGPUGetDataReadOnly(data)); W = mxGPUCreateFromMxArray(prhs[2]); d_W = (float const *)(mxGPUGetDataReadOnly(W)); WtW = mxGPUCreateFromMxArray(prhs[3]); d_WtW = (float const *)(mxGPUGetDataReadOnly(WtW)); mu = mxGPUCreateFromMxArray(prhs[4]); d_mu = (float const *)(mxGPUGetDataReadOnly(mu)); lam = mxGPUCreateFromMxArray(prhs[5]); d_lam = (float const *)(mxGPUGetDataReadOnly(lam)); /* allocate new GPU variables*/ float *d_err,*d_C, *d_xbest, *d_x, *d_dout; int *d_st, *d_ftype, *d_id, *d_counter; cudaMalloc(&d_dout, NT * blocksPerGrid* sizeof(float)); cudaMalloc(&d_err, NT * sizeof(float)); cudaMalloc(&d_xbest, NT * sizeof(float)); cudaMalloc(&d_ftype, NT * sizeof(int)); cudaMalloc(&d_st, maxFR * sizeof(int)); cudaMalloc(&d_id, maxFR * sizeof(int)); cudaMalloc(&d_x, maxFR * sizeof(float)); cudaMalloc(&d_C, maxFR * sizeof(float)); cudaMalloc(&d_counter, 2*sizeof(int)); cudaMemset(d_dout, 0, NT * blocksPerGrid * sizeof(float)); cudaMemset(d_counter, 0, 2*sizeof(int)); cudaMemset(d_st, 0, maxFR * sizeof(int)); cudaMemset(d_id, 0, maxFR * sizeof(int)); cudaMemset(d_x, 0, maxFR * sizeof(float)); cudaMemset(d_C, 0, maxFR * sizeof(float)); int *counter; counter = (int*) calloc(1,sizeof(int)); Conv1D<<<blocksPerGrid,threadsPerBlock>>>(d_Params, d_data, d_W, d_dout); for(int k=0;k<(int) Params[4];k++){ cudaMemset(d_err, 0, NT * sizeof(float)); cudaMemset(d_ftype, 0, NT * sizeof(int)); cudaMemset(d_xbest, 0, NT * sizeof(float)); bestFilter<<<NT/Nthreads,threadsPerBlock>>>( d_Params, d_dout, d_mu, d_lam, d_xbest, d_err, d_ftype); cleanup_spikes<<<NT/Nthreads,threadsPerBlock>>>(d_Params, d_xbest, d_err, d_ftype, d_st, d_id, d_x, d_C, d_counter); cudaMemcpy(counter, d_counter, sizeof(int), cudaMemcpyDeviceToHost); if (counter[0]>maxFR){ counter[0] = maxFR; cudaMemcpy(d_counter, counter, sizeof(int), cudaMemcpyHostToDevice); } subSpikes<<<blocksPerGrid, 2*nt0-1>>>(d_Params, d_st, d_id, d_x, d_counter, d_dout, d_WtW); cudaMemcpy(d_counter+1, d_counter, sizeof(int), cudaMemcpyDeviceToHost); if(counter[0]==maxFR) break; } float *x, *C; int *st, *id; int minSize; if (counter[0]<maxFR) minSize = counter[0]; else minSize = maxFR; const mwSize dimst[] = {minSize,1}; plhs[0] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL); st = (int*) mxGetData(plhs[0]); plhs[1] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL); id = (int*) mxGetData(plhs[1]); plhs[2] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL); x = (float*) mxGetData(plhs[2]); plhs[3] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL); C = (float*) mxGetData(plhs[3]); cudaMemcpy(st, d_st, minSize * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(id, d_id, minSize * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(x, d_x, minSize * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(C, d_C, minSize * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_ftype); cudaFree(d_err); cudaFree(d_xbest); cudaFree(d_st); cudaFree(d_id); cudaFree(d_x); cudaFree(d_C); cudaFree(d_counter); cudaFree(d_Params); cudaFree(d_dout); mxGPUDestroyGPUArray(data); mxGPUDestroyGPUArray(WtW); mxGPUDestroyGPUArray(W); mxGPUDestroyGPUArray(mu); mxGPUDestroyGPUArray(lam); }
68eb3fe06bbaff60d7a42fecdfdb968d486af477.hip
// !!! This is a file automatically generated by hipify!!! #include "curand_provider.cuh" #include <stdexcept> #include <sys/time.h> #include <hip/hip_runtime.h> #include "cumath_functions.cuh" using namespace std; CURandProvider *CURandProvider::s_instance = NULL; CURandProvider::CURandProvider() { } CURandProvider::~CURandProvider() { } hiprandState_t* CURandProvider::GetRandomStates(int device, uint32_t requiredLen) { return GetInstance()->p_GetRandomStates(device, requiredLen); } hiprandState_t* CURandProvider::GetRandomStates(int device, dim3 gridDim, dim3 blockDim) { return GetInstance()->p_GetRandomStates(device, gridDim, blockDim); } CURandProvider* CURandProvider::GetInstance() { if (s_instance == NULL) s_instance = new CURandProvider(); return s_instance; } __global__ void cudaInitRandoms(hiprandState_t *state, uint64_t seed, uint32_t maxId) { uint32_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= maxId) return; hiprand_init(seed, id, 0, state + id); } hiprandState_t* CURandProvider::p_GetRandomStates(int device, uint32_t requiredLen) { CURandDeviceInfo &devInfo = m_buffers[device]; if (devInfo.ArrayLen >= requiredLen) return devInfo.States; bool tryPreserve = devInfo.States != NULL; hiprandState_t *newBuff = NULL; hipError_t err = hipMalloc(&newBuff, requiredLen * sizeof(hiprandState_t)); if (tryPreserve && err != hipSuccess) { // Free the old buffer I suppose. This means that a full re-init will need // to be performed hipFree(devInfo.States); devInfo.ArrayLen = 0; devInfo.States = NULL; tryPreserve = false; err = hipMalloc(&newBuff, requiredLen * sizeof(hiprandState_t)); } if (err != hipSuccess) throw runtime_error("Unable to allocate random buffer large enough to satisfy request."); uint32_t offset = 0; if (tryPreserve) { hipMemcpy(newBuff, devInfo.States, devInfo.ArrayLen * sizeof(hiprandState_t), hipMemcpyDeviceToDevice); hipFree(devInfo.States); offset = devInfo.ArrayLen; } devInfo.ArrayLen = requiredLen; devInfo.States = newBuff; // Initialize all of the new random states hipLaunchKernelGGL(( cudaInitRandoms) , dim3(round_up(requiredLen - offset, 128)), dim3(128), 0, 0, devInfo.States + offset, time(NULL), requiredLen - offset); return devInfo.States; } hiprandState_t* CURandProvider::p_GetRandomStates(int device, dim3 gridDim, dim3 blockDim) { uint32_t reqLen = (gridDim.z * blockDim.z) * (gridDim.y * blockDim.y) * (gridDim.x * blockDim.x); return p_GetRandomStates(device, reqLen); }
68eb3fe06bbaff60d7a42fecdfdb968d486af477.cu
#include "curand_provider.cuh" #include <stdexcept> #include <sys/time.h> #include <cuda_runtime.h> #include "cumath_functions.cuh" using namespace std; CURandProvider *CURandProvider::s_instance = NULL; CURandProvider::CURandProvider() { } CURandProvider::~CURandProvider() { } curandState* CURandProvider::GetRandomStates(int device, uint32_t requiredLen) { return GetInstance()->p_GetRandomStates(device, requiredLen); } curandState* CURandProvider::GetRandomStates(int device, dim3 gridDim, dim3 blockDim) { return GetInstance()->p_GetRandomStates(device, gridDim, blockDim); } CURandProvider* CURandProvider::GetInstance() { if (s_instance == NULL) s_instance = new CURandProvider(); return s_instance; } __global__ void cudaInitRandoms(curandState *state, uint64_t seed, uint32_t maxId) { uint32_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= maxId) return; curand_init(seed, id, 0, state + id); } curandState* CURandProvider::p_GetRandomStates(int device, uint32_t requiredLen) { CURandDeviceInfo &devInfo = m_buffers[device]; if (devInfo.ArrayLen >= requiredLen) return devInfo.States; bool tryPreserve = devInfo.States != NULL; curandState *newBuff = NULL; cudaError_t err = cudaMalloc(&newBuff, requiredLen * sizeof(curandState)); if (tryPreserve && err != cudaSuccess) { // Free the old buffer I suppose. This means that a full re-init will need // to be performed cudaFree(devInfo.States); devInfo.ArrayLen = 0; devInfo.States = NULL; tryPreserve = false; err = cudaMalloc(&newBuff, requiredLen * sizeof(curandState)); } if (err != cudaSuccess) throw runtime_error("Unable to allocate random buffer large enough to satisfy request."); uint32_t offset = 0; if (tryPreserve) { cudaMemcpy(newBuff, devInfo.States, devInfo.ArrayLen * sizeof(curandState), cudaMemcpyDeviceToDevice); cudaFree(devInfo.States); offset = devInfo.ArrayLen; } devInfo.ArrayLen = requiredLen; devInfo.States = newBuff; // Initialize all of the new random states cudaInitRandoms <<<round_up(requiredLen - offset, 128), 128>>> (devInfo.States + offset, time(NULL), requiredLen - offset); return devInfo.States; } curandState* CURandProvider::p_GetRandomStates(int device, dim3 gridDim, dim3 blockDim) { uint32_t reqLen = (gridDim.z * blockDim.z) * (gridDim.y * blockDim.y) * (gridDim.x * blockDim.x); return p_GetRandomStates(device, reqLen); }
3fdbf8be94e13c1f8dffc93bb280758e4e39ae4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdio.h> #include<stdlib.h> #include <math.h> #include <Windows.h> #include <time.h> #include <assert.h> #define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}} typedef float TIMER_T; #define USE_CPU_TIMER 1 #define USE_GPU_TIMER 1 #define IN #define OUT #define INOUT #if USE_CPU_TIMER == 1 __int64 start, freq, end; #define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); } #define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); } #else #define CHECK_TIME_START #define CHECK_TIME_END(a) #endif #if USE_GPU_TIMER == 1 hipEvent_t cuda_timer_start, cuda_timer_stop; #define CUDA_STREAM_0 (0) void create_device_timer() { CUDA_CALL(hipEventCreate(&cuda_timer_start)); CUDA_CALL(hipEventCreate(&cuda_timer_stop)); } void destroy_device_timer() { CUDA_CALL(hipEventDestroy(cuda_timer_start)); CUDA_CALL(hipEventDestroy(cuda_timer_stop)); } inline void start_device_timer() { hipEventRecord(cuda_timer_start, CUDA_STREAM_0); } inline TIMER_T stop_device_timer() { TIMER_T ms; hipEventRecord(cuda_timer_stop, CUDA_STREAM_0); hipEventSynchronize(cuda_timer_stop); hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop); return ms; } #define CHECK_TIME_INIT_GPU() { create_device_timer(); } #define CHECK_TIME_START_GPU() { start_device_timer(); } #define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); } #define CHECK_TIME_DEST_GPU() { destroy_device_timer(); } #else #define CHECK_TIME_INIT_GPU() #define CHECK_TIME_START_GPU() #define CHECK_TIME_END_GPU(a) #define CHECK_TIME_DEST_GPU() #endif #define N_SIZE (1 << 3) // #define NF_SIZE (1 << 2) // Nf #define BLOCK_SIZE (1 << 3) // CUDA thread block #define BLOCK_WIDTH (1 << 3) #define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH) #define N_ITERATION (1 << 0) // TIMER_T compute_time = 0; TIMER_T device_time = 0; int N; int Nf; int *h_ArrayElements; int *h_SumOfArrayElements_CPU; int *h_SumOfArrayElements_GPU; hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // index - Nf index + Nf // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Sum_n_elements_Kernel(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) { /*Todo*/ int idx = blockDim.x * blockIdx.x + threadIdx.x; int sum = d_ArrayElements[Nf-1]; for (int x = 1; x <= idx; x++) { if ((Nf-1 - x) < 0 && (Nf-1 + x) > N) { continue; } else if ((Nf-1 - x) < 0) { sum += d_ArrayElements[Nf-1 + x]; } else if ((Nf-1 + x) > N) { sum += d_ArrayElements[Nf-1 - x]; } else { sum += d_ArrayElements[Nf-1 - x] + d_ArrayElements[Nf-1 + x]; } } d_SumOfArrayElements[idx] = sum; printf("%d ", sum); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // index - Nf index + Nf C // GPU kernel // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) { /*Todo*/ int sum; printf("%d\n", N); for (int i = 0; i < N; i++) { sum = p_ArrayElements[Nf-1]; for (int x = 1; x <= i; x++) { if ((Nf-1 - x) < 0 && (Nf-1 + x) > _msize(p_ArrayElements) / sizeof(int)) { continue; } else if ((Nf-1 - x) < 0) { sum += p_ArrayElements[Nf-1 + x]; } else if ((Nf-1 + x) > _msize(p_ArrayElements) / sizeof(int)) { sum += p_ArrayElements[Nf-1 - x]; } else { sum += p_ArrayElements[Nf-1 - x] + p_ArrayElements[Nf-1 + x]; } } p_SumOfElements_CPU[i] = sum; printf("%d ", sum); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // bin // 4 , 4 Nf , N int // -100 ~ 100 // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void read_bin_file() { printf("***Binary File Read Start!!\n"); FILE *fp = fopen("gen.bin", "rb"); fread(&N, sizeof(int), 1, fp); fread(&Nf, sizeof(int), 1, fp); h_ArrayElements = (int *)malloc(N * sizeof(int)); h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int)); h_SumOfArrayElements_GPU = (int *)malloc(N * sizeof(int)); fread(h_ArrayElements, sizeof(int), N, fp); fclose(fp); printf("***Binary File Read End!!\n\n"); } void init_bin_file(IN int n, IN int nf) { printf("***Binary File Create Start!!\n"); srand((unsigned)time(NULL)); FILE *fp = fopen("gen.bin", "wb"); fwrite(&n, sizeof(int), 1, fp); fwrite(&nf, sizeof(int), 1, fp); int i, input; for (i = 0; i < n; i++) { input = (int)((float)rand() / RAND_MAX * 200 - 100); fwrite(&input, sizeof(int), 1, fp); } fclose(fp); printf("***Binary File Create End!!\n\n"); } int main() { int i; init_bin_file(N_SIZE, NF_SIZE); read_bin_file(); TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f; printf("%d -> %d\n", Nf, h_ArrayElements[Nf-1]); printf("%d & %d\n", h_ArrayElements[Nf-2], h_ArrayElements[Nf]); printf("%d & %d\n", h_ArrayElements[Nf -3], h_ArrayElements[Nf+1]); for (i = 0; i < N_ITERATION; i++) { CHECK_TIME_START; Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf); CHECK_TIME_END(compute_time); CPU_time += compute_time; printf("\n"); Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU, Nf); GPU_time_NO_SHARED += device_time; } for (i = 0; i < N; i++) { if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU[i]) { printf("%d : CPU : %d,\tGPU : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU[i]); break; } } if (i == N) printf("***Kernel execution Success!!\n\n"); printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION); printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION); free(h_ArrayElements); free(h_SumOfArrayElements_CPU); free(h_SumOfArrayElements_GPU); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf) { hipError_t cudaStatus; /*Todo*/ cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } int* p_ArrayElements2, * p_SumOfElements_GPU2; size_t size; size = N * sizeof(int); CUDA_CALL(hipMalloc(&p_ArrayElements2, size)) CUDA_CALL(hipMemcpy(p_ArrayElements2, p_ArrayElements, size, hipMemcpyHostToDevice)) CUDA_CALL(hipMalloc(&p_SumOfElements_GPU2, size)) // // Assume that width and height are multiples of BLOCK SIZE. // // (=32) ( ) dim3 dimBlock(BLOCK_SIZE); // dimblock(block_size, 1, 1) 32 // . blocksize . // , .x == dim3 dimGrid(N_SIZE / dimBlock.x); //, /(=32) Sum_n_elements_Kernel << < dimGrid, dimBlock >> > (p_ArrayElements2, p_SumOfElements_GPU2,N, Nf); CUDA_CALL(hipGetLastError()) // // hipDeviceSynchronize waits for the kernel to finish, and returns // // any errors encountered during the launch. CUDA_CALL(hipDeviceSynchronize()) CUDA_CALL(hipMemcpy(p_SumOfElements_GPU, p_SumOfElements_GPU2, size, hipMemcpyDeviceToHost)) CHECK_TIME_DEST_GPU(); Error: hipFree(p_ArrayElements2); hipFree(p_SumOfElements_GPU2); return cudaStatus; }
3fdbf8be94e13c1f8dffc93bb280758e4e39ae4f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdio.h> #include<stdlib.h> #include <math.h> #include <Windows.h> #include <time.h> #include <assert.h> #define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}} typedef float TIMER_T; #define USE_CPU_TIMER 1 #define USE_GPU_TIMER 1 #define IN #define OUT #define INOUT #if USE_CPU_TIMER == 1 __int64 start, freq, end; #define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); } #define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); } #else #define CHECK_TIME_START #define CHECK_TIME_END(a) #endif #if USE_GPU_TIMER == 1 cudaEvent_t cuda_timer_start, cuda_timer_stop; #define CUDA_STREAM_0 (0) void create_device_timer() { CUDA_CALL(cudaEventCreate(&cuda_timer_start)); CUDA_CALL(cudaEventCreate(&cuda_timer_stop)); } void destroy_device_timer() { CUDA_CALL(cudaEventDestroy(cuda_timer_start)); CUDA_CALL(cudaEventDestroy(cuda_timer_stop)); } inline void start_device_timer() { cudaEventRecord(cuda_timer_start, CUDA_STREAM_0); } inline TIMER_T stop_device_timer() { TIMER_T ms; cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0); cudaEventSynchronize(cuda_timer_stop); cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop); return ms; } #define CHECK_TIME_INIT_GPU() { create_device_timer(); } #define CHECK_TIME_START_GPU() { start_device_timer(); } #define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); } #define CHECK_TIME_DEST_GPU() { destroy_device_timer(); } #else #define CHECK_TIME_INIT_GPU() #define CHECK_TIME_START_GPU() #define CHECK_TIME_END_GPU(a) #define CHECK_TIME_DEST_GPU() #endif #define N_SIZE (1 << 3) // 전체 데이터 사이즈 #define NF_SIZE (1 << 2) // Nf 크기 #define BLOCK_SIZE (1 << 3) // CUDA 커널 thread block 사이즈 #define BLOCK_WIDTH (1 << 3) #define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH) #define N_ITERATION (1 << 0) // 실험 반복 횟수 TIMER_T compute_time = 0; TIMER_T device_time = 0; int N; int Nf; int *h_ArrayElements; int *h_SumOfArrayElements_CPU; int *h_SumOfArrayElements_GPU; cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드 // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Sum_n_elements_Kernel(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) { /*Todo*/ int idx = blockDim.x * blockIdx.x + threadIdx.x; int sum = d_ArrayElements[Nf-1]; for (int x = 1; x <= idx; x++) { if ((Nf-1 - x) < 0 && (Nf-1 + x) > N) { continue; } else if ((Nf-1 - x) < 0) { sum += d_ArrayElements[Nf-1 + x]; } else if ((Nf-1 + x) > N) { sum += d_ArrayElements[Nf-1 - x]; } else { sum += d_ArrayElements[Nf-1 - x] + d_ArrayElements[Nf-1 + x]; } } d_SumOfArrayElements[idx] = sum; printf("%d ", sum); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 C 코드 // GPU kernel의 결과와 비교를 통해 옳은 계산을 하였는지 판단하는 데이터로 활용 // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) { /*Todo*/ int sum; printf("%d\n", N); for (int i = 0; i < N; i++) { sum = p_ArrayElements[Nf-1]; for (int x = 1; x <= i; x++) { if ((Nf-1 - x) < 0 && (Nf-1 + x) > _msize(p_ArrayElements) / sizeof(int)) { continue; } else if ((Nf-1 - x) < 0) { sum += p_ArrayElements[Nf-1 + x]; } else if ((Nf-1 + x) > _msize(p_ArrayElements) / sizeof(int)) { sum += p_ArrayElements[Nf-1 - x]; } else { sum += p_ArrayElements[Nf-1 - x] + p_ArrayElements[Nf-1 + x]; } } p_SumOfElements_CPU[i] = sum; printf("%d ", sum); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // 주어진 bin 파일을 읽는 코드 // 첫 4바이트는 전체 데이터의 개수, 다음 4바이트는 Nf의 크기, 그 이후 N개의 int형 데이터가 저장 // 데이터는 -100 ~ 100 까지의 범위 안의 정수 // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void read_bin_file() { printf("***Binary File Read Start!!\n"); FILE *fp = fopen("gen.bin", "rb"); fread(&N, sizeof(int), 1, fp); fread(&Nf, sizeof(int), 1, fp); h_ArrayElements = (int *)malloc(N * sizeof(int)); h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int)); h_SumOfArrayElements_GPU = (int *)malloc(N * sizeof(int)); fread(h_ArrayElements, sizeof(int), N, fp); fclose(fp); printf("***Binary File Read End!!\n\n"); } void init_bin_file(IN int n, IN int nf) { printf("***Binary File Create Start!!\n"); srand((unsigned)time(NULL)); FILE *fp = fopen("gen.bin", "wb"); fwrite(&n, sizeof(int), 1, fp); fwrite(&nf, sizeof(int), 1, fp); int i, input; for (i = 0; i < n; i++) { input = (int)((float)rand() / RAND_MAX * 200 - 100); fwrite(&input, sizeof(int), 1, fp); } fclose(fp); printf("***Binary File Create End!!\n\n"); } int main() { int i; init_bin_file(N_SIZE, NF_SIZE); read_bin_file(); TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f; printf("%d -> %d\n", Nf, h_ArrayElements[Nf-1]); printf("%d & %d\n", h_ArrayElements[Nf-2], h_ArrayElements[Nf]); printf("%d & %d\n", h_ArrayElements[Nf -3], h_ArrayElements[Nf+1]); for (i = 0; i < N_ITERATION; i++) { CHECK_TIME_START; Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf); CHECK_TIME_END(compute_time); CPU_time += compute_time; printf("\n"); Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU, Nf); GPU_time_NO_SHARED += device_time; } for (i = 0; i < N; i++) { if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU[i]) { printf("%d : CPU : %d,\tGPU : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU[i]); break; } } if (i == N) printf("***Kernel execution Success!!\n\n"); printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION); printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION); free(h_ArrayElements); free(h_SumOfArrayElements_CPU); free(h_SumOfArrayElements_GPU); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // 커널을 실행하기 전 필요한 자료들 준비 및 커널을 실행할 디바이스를 설정 // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf) { cudaError_t cudaStatus; /*Todo*/ cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } int* p_ArrayElements2, * p_SumOfElements_GPU2; size_t size; size = N * sizeof(int); CUDA_CALL(cudaMalloc(&p_ArrayElements2, size)) CUDA_CALL(cudaMemcpy(p_ArrayElements2, p_ArrayElements, size, cudaMemcpyHostToDevice)) CUDA_CALL(cudaMalloc(&p_SumOfElements_GPU2, size)) // // Assume that width and height are multiples of BLOCK SIZE. //쓰레드 결정 하는 데 //블럭 안에 쓰레드가 블럭사이즈(=32)만큼 존재 (쓰레드가 블럭수만큼 생성됨) dim3 dimBlock(BLOCK_SIZE); // dimblock(block_size, 1, 1) 블록 안에 32개의 쓰레드가 존재 //블럭의 총 갯수. 블록 하나당 쓰레드가 blocksize만큼 존재. //엘리먼트 사이즈가 총 쓰레드 개수, 딤블록.x == 블록의 개수 dim3 dimGrid(N_SIZE / dimBlock.x); //즉, 블록의 총갯수는 엘리먼트사이즈/딤블록(=32) Sum_n_elements_Kernel << < dimGrid, dimBlock >> > (p_ArrayElements2, p_SumOfElements_GPU2,N, Nf); CUDA_CALL(cudaGetLastError()) // // cudaDeviceSynchronize waits for the kernel to finish, and returns // // any errors encountered during the launch. CUDA_CALL(cudaDeviceSynchronize()) CUDA_CALL(cudaMemcpy(p_SumOfElements_GPU, p_SumOfElements_GPU2, size, cudaMemcpyDeviceToHost)) CHECK_TIME_DEST_GPU(); Error: cudaFree(p_ArrayElements2); cudaFree(p_SumOfElements_GPU2); return cudaStatus; }
f31ddd88b86204f85d59a4a9b9ce2ae54e329651.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/PReLU.cu" #else void THNN_(PReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight) { THCTensor_(resizeAs)(state, output, input); int64_t nOutputPlane = THCTensor_(numel)(state, weight); weight = THCTensor_(newContiguous)(state, weight); real *w = THCTensor_(data)(state, weight); if (nOutputPlane == 1) { THC_pointwiseApply2<real, real>(state, output, input, PReLUUpdateOutput<real>(w)); } else { int ndim = THCTensor_(nDimensionLegacyAll)(state, input); input = THCTensor_(newContiguous)(state, input); int n = THCTensor_(nElement)(state, input); if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1)); int mapSize = 1; for (int d = 2; d < ndim; d++) { mapSize *= input->size(d); } int nElemsPerSample = nOutputPlane * mapSize; hipLaunchKernelGGL(( preluForward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, output), THCTensor_(data)(state, input), w, n, nElemsPerSample, mapSize ); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); } THCTensor_(free)(state, weight); } void THNN_(PReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight) { THCUNN_check_nElement(state, input, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); int64_t nOutputPlane = THCTensor_(numel)(state, weight); weight = THCTensor_(newContiguous)(state, weight); real *w = THCTensor_(data)(state, weight); if (nOutputPlane == 1) { THC_pointwiseApply3<real, real, real>(state, gradInput, gradOutput, input, PReLUUpdateGradInput<real>(w)); } else { int ndim = THCTensor_(nDimensionLegacyAll)(state, input); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); int n = THCTensor_(nElement)(state, input); if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1)); int mapSize = 1; for (int d = 2; d < ndim; d++) { mapSize *= input->size(d); } int nElemsPerSample = nOutputPlane * mapSize; hipLaunchKernelGGL(( preluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, input), w, THCTensor_(data)(state, gradOutput), n, nElemsPerSample, mapSize ); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } THCTensor_(free)(state, weight); } void THNN_(PReLU_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight, THCTensor *gradWeight, accreal scale_) { real scale = ScalarConvert<accreal, real>::to(scale_); THCUNN_check_nElement(state, input, gradOutput); int64_t nOutputPlane = THCTensor_(numel)(state, weight); // use grad input for temporary storage, then call updateGradInput again if (nOutputPlane == 1) { THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<real>()); // introduces a sync point real sum = ScalarConvert<accreal, real>::to(THCTensor_(sumall)(state, gradInput)); real w = THCTensor_(get1d)(state, gradWeight, 0); THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale); // restore gradInput THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight); } else { int ndim = THCTensor_(nDimensionLegacyAll)(state, input); if (ndim == 1) { THC_pointwiseApply3<real, real, real>(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<real>(scale)); } else { THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParameters<real>(scale)); THCTensor *gradWeightBuf = THCTensor_(new)(state); THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight); if (ndim == 2) { THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 0); THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf); } else { THCTensor *sumbuf = THCTensor_(new)(state); THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput); int64_t size3 = 1; for (int d = 2; d < ndim; d++) { size3 *= input->size(d); } THCTensor_(resize3d)(state, buffer, input->size(0), nOutputPlane, size3); THCTensor_(resize2d)(state, sumbuf, input->size(0), nOutputPlane); THCTensor_(sum)(state, sumbuf, buffer, 2, 0); THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 0); THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf); THCTensor_(free)(state, buffer); THCTensor_(free)(state, sumbuf); } THCTensor_(free)(state, gradWeightBuf); // restore gradInput THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight); } } } #endif
f31ddd88b86204f85d59a4a9b9ce2ae54e329651.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/PReLU.cu" #else void THNN_(PReLU_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight) { THCTensor_(resizeAs)(state, output, input); int64_t nOutputPlane = THCTensor_(numel)(state, weight); weight = THCTensor_(newContiguous)(state, weight); real *w = THCTensor_(data)(state, weight); if (nOutputPlane == 1) { THC_pointwiseApply2<real, real>(state, output, input, PReLUUpdateOutput<real>(w)); } else { int ndim = THCTensor_(nDimensionLegacyAll)(state, input); input = THCTensor_(newContiguous)(state, input); int n = THCTensor_(nElement)(state, input); if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1)); int mapSize = 1; for (int d = 2; d < ndim; d++) { mapSize *= input->size(d); } int nElemsPerSample = nOutputPlane * mapSize; preluForward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), w, n, nElemsPerSample, mapSize ); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); } THCTensor_(free)(state, weight); } void THNN_(PReLU_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight) { THCUNN_check_nElement(state, input, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); int64_t nOutputPlane = THCTensor_(numel)(state, weight); weight = THCTensor_(newContiguous)(state, weight); real *w = THCTensor_(data)(state, weight); if (nOutputPlane == 1) { THC_pointwiseApply3<real, real, real>(state, gradInput, gradOutput, input, PReLUUpdateGradInput<real>(w)); } else { int ndim = THCTensor_(nDimensionLegacyAll)(state, input); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); int n = THCTensor_(nElement)(state, input); if (THTensor_sizeLegacyNoScalars(input, ndim > 1) != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, ndim > 1)); int mapSize = 1; for (int d = 2; d < ndim; d++) { mapSize *= input->size(d); } int nElemsPerSample = nOutputPlane * mapSize; preluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, input), w, THCTensor_(data)(state, gradOutput), n, nElemsPerSample, mapSize ); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } THCTensor_(free)(state, weight); } void THNN_(PReLU_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight, THCTensor *gradWeight, accreal scale_) { real scale = ScalarConvert<accreal, real>::to(scale_); THCUNN_check_nElement(state, input, gradOutput); int64_t nOutputPlane = THCTensor_(numel)(state, weight); // use grad input for temporary storage, then call updateGradInput again if (nOutputPlane == 1) { THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<real>()); // introduces a sync point real sum = ScalarConvert<accreal, real>::to(THCTensor_(sumall)(state, gradInput)); real w = THCTensor_(get1d)(state, gradWeight, 0); THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale); // restore gradInput THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight); } else { int ndim = THCTensor_(nDimensionLegacyAll)(state, input); if (ndim == 1) { THC_pointwiseApply3<real, real, real>(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<real>(scale)); } else { THC_pointwiseApply3<real, real, real>(state, gradInput, input, gradOutput, PReLUAccGradParameters<real>(scale)); THCTensor *gradWeightBuf = THCTensor_(new)(state); THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight); if (ndim == 2) { THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 0); THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf); } else { THCTensor *sumbuf = THCTensor_(new)(state); THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput); int64_t size3 = 1; for (int d = 2; d < ndim; d++) { size3 *= input->size(d); } THCTensor_(resize3d)(state, buffer, input->size(0), nOutputPlane, size3); THCTensor_(resize2d)(state, sumbuf, input->size(0), nOutputPlane); THCTensor_(sum)(state, sumbuf, buffer, 2, 0); THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 0); THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf); THCTensor_(free)(state, buffer); THCTensor_(free)(state, sumbuf); } THCTensor_(free)(state, gradWeightBuf); // restore gradInput THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight); } } } #endif
3bf126c82f24d3e74be47c1a242e78eb1fafb138.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix addition: P = alpha*M + beta*N. * Device code. */ #ifndef _MATRIXADD_KERNEL_H_ #define _MATRIXADD_KERNEL_H_ #include <stdio.h> #include "matrixadd.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix addition kernel thread specification __global__ void MatrixAddKernel(const float* Melems, const float alpha, const float* Nelems, const float beta, float* Pelems) { // ADD YOUR CODE HERE int index = threadIdx.x + blockIdx.x * blockDim.x; Pelems[index] = Melems[index] + Nelems[index]; } #endif // #ifndef _MATRIXADD_KERNEL_H_
3bf126c82f24d3e74be47c1a242e78eb1fafb138.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix addition: P = alpha*M + beta*N. * Device code. */ #ifndef _MATRIXADD_KERNEL_H_ #define _MATRIXADD_KERNEL_H_ #include <stdio.h> #include "matrixadd.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix addition kernel thread specification __global__ void MatrixAddKernel(const float* Melems, const float alpha, const float* Nelems, const float beta, float* Pelems) { // ADD YOUR CODE HERE int index = threadIdx.x + blockIdx.x * blockDim.x; Pelems[index] = Melems[index] + Nelems[index]; } #endif // #ifndef _MATRIXADD_KERNEL_H_
700eda9166280ab8fa265e59a32e4f15ce419303.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <hip/hip_runtime.h> // Utilities and system includes //#include <helper_functions.h> #include <hip/hip_runtime.h> #include "../include/common.h" //#include <ctime.h> #include <time.h> #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) __constant__ float c_Kernel[KERNEL_LENGTH]; texture<float,2,hipReadModeElementType> texRef; void setConvolutionKernel(float *h_Kernel) { hipMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *cc_Kernel, float *d_Dst, //float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Kernel[KERNEL_LENGTH]; __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; if(threadIdx.y*blockDim.x+threadIdx.x<KERNEL_LENGTH) {s_Kernel[threadIdx.y*blockDim.x+threadIdx.x]=cc_Kernel[threadIdx.y*blockDim.x+threadIdx.x]; }//{for (int i =0;i<KERNEL_LENGTH;i++) //s_Kernel[i] = cc_Kernel[i];} __syncthreads(); const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; //d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = tex2D(texRef,baseX+i*ROWS_BLOCKDIM_X,baseY);//d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? tex2D(texRef,baseX+i*ROWS_BLOCKDIM_X,baseY):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo //#pragma unroll // printf("%d,%d\n",ROWS_HALO_STEPS,ROWS_HALO_STEPS); for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? tex2D(texRef,baseX+i*ROWS_BLOCKDIM_X,baseY):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += s_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *cc_Kernel, float *d_Dst, // float *d_Src, int imageW, int imageH ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0, cc_Kernel, d_Dst, // d_Src, imageW, imageH, imageW ); getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW ); getLastCudaError("convolutionColumnsKernel() execution failed\n"); } void convolutionRowCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = x + k; if (d >= 0 && d < imageW) sum += h_Src[y * imageW + d] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = y + k; if (d >= 0 && d < imageH) sum += h_Src[d * imageW + x] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input, *d_Output, *d_Buffer; const int imageW = 3072; const int imageH = 3072; const int iterations = 16; struct timespec t1,t2; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s //findCudaDevice(argc, (const char **)argv); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(hipMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); float *cc_Kernel; hipMalloc((void **)&cc_Kernel, KERNEL_LENGTH * sizeof(float)); // hipMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); hipMemcpy(cc_Kernel,h_Kernel,KERNEL_LENGTH * sizeof(float),hipMemcpyHostToDevice); setConvolutionKernel(h_Kernel); //checkCudaErrors(hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice)); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipArray* cuArray; hipMallocArray(&cuArray, &channelDesc, imageW, imageH); // Copy to device memory some data located at address h_data // in host memory hipMemcpyToArray(cuArray, 0, 0, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice); // Set texture reference parameters texRef.addressMode[0] = hipAddressModeWrap; texRef.addressMode[1] = hipAddressModeWrap; texRef.filterMode = hipFilterModePoint; // Bind the array to the texture reference hipBindTextureToArray(texRef, cuArray, channelDesc); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(hipDeviceSynchronize()); //clock_gettime(CLOCK_MONOTONIC,&t1); } if(i==1) clock_gettime(CLOCK_MONOTONIC,&t1); convolutionRowsGPU( cc_Kernel, d_Buffer, //d_Input, imageW, imageH ); checkCudaErrors(hipDeviceSynchronize()); if(i==1) clock_gettime(CLOCK_MONOTONIC,&t2); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH ); } checkCudaErrors(hipDeviceSynchronize()); // clock_gettime(CLOCK_MONOTONIC,&t2); double gpuTime = ((t2.tv_sec-t1.tv_sec)+ (t2.tv_nsec-t1.tv_nsec)/1.e9);/// (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(hipMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), hipMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(hipFree(d_Buffer)); checkCudaErrors(hipFree(d_Output)); checkCudaErrors(hipFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); hipDeviceReset(); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
700eda9166280ab8fa265e59a32e4f15ce419303.cu
/* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <cuda_runtime.h> // Utilities and system includes //#include <helper_functions.h> #include <cuda.h> #include "../include/common.h" //#include <ctime.h> #include <time.h> #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) __constant__ float c_Kernel[KERNEL_LENGTH]; texture<float,2,cudaReadModeElementType> texRef; void setConvolutionKernel(float *h_Kernel) { cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *cc_Kernel, float *d_Dst, //float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Kernel[KERNEL_LENGTH]; __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; if(threadIdx.y*blockDim.x+threadIdx.x<KERNEL_LENGTH) {s_Kernel[threadIdx.y*blockDim.x+threadIdx.x]=cc_Kernel[threadIdx.y*blockDim.x+threadIdx.x]; }//{for (int i =0;i<KERNEL_LENGTH;i++) //s_Kernel[i] = cc_Kernel[i];} __syncthreads(); const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; //d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = tex2D(texRef,baseX+i*ROWS_BLOCKDIM_X,baseY);//d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? tex2D(texRef,baseX+i*ROWS_BLOCKDIM_X,baseY):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo //#pragma unroll // printf("%d,%d\n",ROWS_HALO_STEPS,ROWS_HALO_STEPS); for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? tex2D(texRef,baseX+i*ROWS_BLOCKDIM_X,baseY):0;//d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += s_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *cc_Kernel, float *d_Dst, // float *d_Src, int imageW, int imageH ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); convolutionRowsKernel<<<blocks, threads>>>( cc_Kernel, d_Dst, // d_Src, imageW, imageH, imageW ); getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); convolutionColumnsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW ); getLastCudaError("convolutionColumnsKernel() execution failed\n"); } void convolutionRowCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = x + k; if (d >= 0 && d < imageW) sum += h_Src[y * imageW + d] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU( float *h_Dst, float *h_Src, float *h_Kernel, int imageW, int imageH, int kernelR ) { for (int y = 0; y < imageH; y++) for (int x = 0; x < imageW; x++) { float sum = 0; for (int k = -kernelR; k <= kernelR; k++) { int d = y + k; if (d >= 0 && d < imageH) sum += h_Src[d * imageW + x] * h_Kernel[kernelR - k]; } h_Dst[y * imageW + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input, *d_Output, *d_Buffer; const int imageW = 3072; const int imageH = 3072; const int iterations = 16; struct timespec t1,t2; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s //findCudaDevice(argc, (const char **)argv); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(cudaMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); float *cc_Kernel; cudaMalloc((void **)&cc_Kernel, KERNEL_LENGTH * sizeof(float)); // cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); cudaMemcpy(cc_Kernel,h_Kernel,KERNEL_LENGTH * sizeof(float),cudaMemcpyHostToDevice); setConvolutionKernel(h_Kernel); //checkCudaErrors(cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice)); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaArray* cuArray; cudaMallocArray(&cuArray, &channelDesc, imageW, imageH); // Copy to device memory some data located at address h_data // in host memory cudaMemcpyToArray(cuArray, 0, 0, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice); // Set texture reference parameters texRef.addressMode[0] = cudaAddressModeWrap; texRef.addressMode[1] = cudaAddressModeWrap; texRef.filterMode = cudaFilterModePoint; // Bind the array to the texture reference cudaBindTextureToArray(texRef, cuArray, channelDesc); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(cudaDeviceSynchronize()); //clock_gettime(CLOCK_MONOTONIC,&t1); } if(i==1) clock_gettime(CLOCK_MONOTONIC,&t1); convolutionRowsGPU( cc_Kernel, d_Buffer, //d_Input, imageW, imageH ); checkCudaErrors(cudaDeviceSynchronize()); if(i==1) clock_gettime(CLOCK_MONOTONIC,&t2); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH ); } checkCudaErrors(cudaDeviceSynchronize()); // clock_gettime(CLOCK_MONOTONIC,&t2); double gpuTime = ((t2.tv_sec-t1.tv_sec)+ (t2.tv_nsec-t1.tv_nsec)/1.e9);/// (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(cudaMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), cudaMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(cudaFree(d_Buffer)); checkCudaErrors(cudaFree(d_Output)); checkCudaErrors(cudaFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); cudaDeviceReset(); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
d2d6330021b82b2b602c0dba0c10f6032f2b37f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <iostream> #include <cstdio> #include <helper_cuda.h> #include <helper_string.h> #define MAX_DEPTH 16 #define INSERTION_SORT 32 //////////////////////////////////////////////////////////////////////////////// // Selection sort used when depth gets too big or the number of elements drops // below a threshold. //////////////////////////////////////////////////////////////////////////////// __device__ void selection_sort(unsigned int *data, int left, int right) { for (int i = left ; i <= right ; ++i) { unsigned min_val = data[i]; int min_idx = i; // Find the smallest value in the range [left, right]. for (int j = i+1 ; j <= right ; ++j) { unsigned val_j = data[j]; if (val_j < min_val) { min_idx = j; min_val = val_j; } } // Swap the values. if (i != min_idx) { data[min_idx] = data[i]; data[i] = min_val; } } } //////////////////////////////////////////////////////////////////////////////// // Very basic quicksort algorithm, recursively launching the next level. //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_simple_quicksort(unsigned int *data, int left, int right, int depth) { // If we're too deep or there are few elements left, we use an insertion sort... if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT) { selection_sort(data, left, right); return; } unsigned int *lptr = data+left; unsigned int *rptr = data+right; unsigned int pivot = data[(left+right)/2]; // Do the partitioning. while (lptr <= rptr) { // Find the next left- and right-hand values to swap unsigned int lval = *lptr; unsigned int rval = *rptr; // Move the left pointer as long as the pointed element is smaller than the pivot. while (lval < pivot) { lptr++; lval = *lptr; } // Move the right pointer as long as the pointed element is larger than the pivot. while (rval > pivot) { rptr--; rval = *rptr; } // If the swap points are valid, do the swap! if (lptr <= rptr) { *lptr++ = rval; *rptr-- = lval; } } // Now the recursive part int nright = rptr - data; int nleft = lptr - data; // Launch a new block to sort the left part. if (left < (rptr-data)) { hipStream_t s; hipStreamCreateWithFlags(&s, hipStreamNonBlocking); hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s , data, left, nright, depth+1); hipStreamDestroy(s); } // Launch a new block to sort the right part. if ((lptr-data) < right) { hipStream_t s1; hipStreamCreateWithFlags(&s1, hipStreamNonBlocking); hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s1 , data, nleft, right, depth+1); hipStreamDestroy(s1); } } //////////////////////////////////////////////////////////////////////////////// // Call the quicksort kernel from the host. //////////////////////////////////////////////////////////////////////////////// void run_qsort(unsigned int *data, unsigned int nitems) { // Prepare CDP for the max depth 'MAX_DEPTH'. checkCudaErrors(hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH)); // Launch on device int left = 0; int right = nitems-1; std::cout << "Launching kernel on the GPU" << std::endl; hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1) , 0, 0, data, left, right, 0); checkCudaErrors(hipDeviceSynchronize()); } //////////////////////////////////////////////////////////////////////////////// // Initialize data on the host. //////////////////////////////////////////////////////////////////////////////// void initialize_data(unsigned int *dst, unsigned int nitems) { // Fixed seed for illustration srand(2047); // Fill dst with random values for (unsigned i = 0 ; i < nitems ; i++) dst[i] = rand() % nitems ; } //////////////////////////////////////////////////////////////////////////////// // Verify the results. //////////////////////////////////////////////////////////////////////////////// void check_results(int n, unsigned int *results_d) { unsigned int *results_h = new unsigned[n]; checkCudaErrors(hipMemcpy(results_h, results_d, n*sizeof(unsigned), hipMemcpyDeviceToHost)); for (int i = 1 ; i < n ; ++i) if (results_h[i-1] > results_h[i]) { std::cout << "Invalid item[" << i-1 << "]: " << results_h[i-1] << " greater than " << results_h[i] << std::endl; exit(EXIT_FAILURE); } std::cout << "OK" << std::endl; delete[] results_h; } //////////////////////////////////////////////////////////////////////////////// // Main entry point. //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { int num_items = 128; bool verbose = false; if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "h")) { std::cerr << "Usage: " << argv[0] << " num_items=<num_items>\twhere num_items is the number of items to sort" << std::endl; exit(EXIT_SUCCESS); } if (checkCmdLineFlag(argc, (const char **)argv, "v")) { verbose = true; } if (checkCmdLineFlag(argc, (const char **)argv, "num_items")) { num_items = getCmdLineArgumentInt(argc, (const char **)argv, "num_items"); if (num_items < 1) { std::cerr << "ERROR: num_items has to be greater than 1" << std::endl; exit(EXIT_FAILURE); } } // Get device properties int device_count = 0, device = -1; if(checkCmdLineFlag(argc, (const char **)argv, "device")) { device = getCmdLineArgumentInt(argc, (const char **)argv, "device"); hipDeviceProp_t properties; checkCudaErrors(hipGetDeviceProperties(&properties, device)); if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5)) { std::cout << "Running on GPU " << device << " (" << properties.name << ")" << std::endl; } else { std::cout << "ERROR: cdpsimpleQuicksort requires GPU devices with compute SM 3.5 or higher."<< std::endl; std::cout << "Current GPU device has compute SM" << properties.major <<"."<< properties.minor <<". Exiting..." << std::endl; exit(EXIT_FAILURE); } } else { checkCudaErrors(hipGetDeviceCount(&device_count)); for (int i = 0 ; i < device_count ; ++i) { hipDeviceProp_t properties; checkCudaErrors(hipGetDeviceProperties(&properties, i)); if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5)) { device = i; std::cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl; break; } std::cout << "GPU " << i << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl; } } if (device == -1) { std::cerr << "cdpSimpleQuicksort requires GPU devices with compute SM 3.5 or higher. Exiting..." << std::endl; exit(EXIT_WAIVED); } hipSetDevice(device); // Create input data unsigned int *h_data = 0; unsigned int *d_data = 0; // Allocate CPU memory and initialize data. std::cout << "Initializing data:" << std::endl; h_data =(unsigned int *)malloc(num_items*sizeof(unsigned int)); initialize_data(h_data, num_items); if (verbose) { for (int i=0 ; i<num_items ; i++) std::cout << "Data [" << i << "]: " << h_data[i] << std::endl; } // Allocate GPU memory. checkCudaErrors(hipMalloc((void **)&d_data, num_items * sizeof(unsigned int))); checkCudaErrors(hipMemcpy(d_data, h_data, num_items * sizeof(unsigned int), hipMemcpyHostToDevice)); // Execute std::cout << "Running quicksort on " << num_items << " elements" << std::endl; run_qsort(d_data, num_items); // Check result std::cout << "Validating results: "; check_results(num_items, d_data); free(h_data); checkCudaErrors(hipFree(d_data)); exit(EXIT_SUCCESS); }
d2d6330021b82b2b602c0dba0c10f6032f2b37f0.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <iostream> #include <cstdio> #include <helper_cuda.h> #include <helper_string.h> #define MAX_DEPTH 16 #define INSERTION_SORT 32 //////////////////////////////////////////////////////////////////////////////// // Selection sort used when depth gets too big or the number of elements drops // below a threshold. //////////////////////////////////////////////////////////////////////////////// __device__ void selection_sort(unsigned int *data, int left, int right) { for (int i = left ; i <= right ; ++i) { unsigned min_val = data[i]; int min_idx = i; // Find the smallest value in the range [left, right]. for (int j = i+1 ; j <= right ; ++j) { unsigned val_j = data[j]; if (val_j < min_val) { min_idx = j; min_val = val_j; } } // Swap the values. if (i != min_idx) { data[min_idx] = data[i]; data[i] = min_val; } } } //////////////////////////////////////////////////////////////////////////////// // Very basic quicksort algorithm, recursively launching the next level. //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_simple_quicksort(unsigned int *data, int left, int right, int depth) { // If we're too deep or there are few elements left, we use an insertion sort... if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT) { selection_sort(data, left, right); return; } unsigned int *lptr = data+left; unsigned int *rptr = data+right; unsigned int pivot = data[(left+right)/2]; // Do the partitioning. while (lptr <= rptr) { // Find the next left- and right-hand values to swap unsigned int lval = *lptr; unsigned int rval = *rptr; // Move the left pointer as long as the pointed element is smaller than the pivot. while (lval < pivot) { lptr++; lval = *lptr; } // Move the right pointer as long as the pointed element is larger than the pivot. while (rval > pivot) { rptr--; rval = *rptr; } // If the swap points are valid, do the swap! if (lptr <= rptr) { *lptr++ = rval; *rptr-- = lval; } } // Now the recursive part int nright = rptr - data; int nleft = lptr - data; // Launch a new block to sort the left part. if (left < (rptr-data)) { cudaStream_t s; cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking); cdp_simple_quicksort<<< 1, 1, 0, s >>>(data, left, nright, depth+1); cudaStreamDestroy(s); } // Launch a new block to sort the right part. if ((lptr-data) < right) { cudaStream_t s1; cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking); cdp_simple_quicksort<<< 1, 1, 0, s1 >>>(data, nleft, right, depth+1); cudaStreamDestroy(s1); } } //////////////////////////////////////////////////////////////////////////////// // Call the quicksort kernel from the host. //////////////////////////////////////////////////////////////////////////////// void run_qsort(unsigned int *data, unsigned int nitems) { // Prepare CDP for the max depth 'MAX_DEPTH'. checkCudaErrors(cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH)); // Launch on device int left = 0; int right = nitems-1; std::cout << "Launching kernel on the GPU" << std::endl; cdp_simple_quicksort<<< 1, 1 >>>(data, left, right, 0); checkCudaErrors(cudaDeviceSynchronize()); } //////////////////////////////////////////////////////////////////////////////// // Initialize data on the host. //////////////////////////////////////////////////////////////////////////////// void initialize_data(unsigned int *dst, unsigned int nitems) { // Fixed seed for illustration srand(2047); // Fill dst with random values for (unsigned i = 0 ; i < nitems ; i++) dst[i] = rand() % nitems ; } //////////////////////////////////////////////////////////////////////////////// // Verify the results. //////////////////////////////////////////////////////////////////////////////// void check_results(int n, unsigned int *results_d) { unsigned int *results_h = new unsigned[n]; checkCudaErrors(cudaMemcpy(results_h, results_d, n*sizeof(unsigned), cudaMemcpyDeviceToHost)); for (int i = 1 ; i < n ; ++i) if (results_h[i-1] > results_h[i]) { std::cout << "Invalid item[" << i-1 << "]: " << results_h[i-1] << " greater than " << results_h[i] << std::endl; exit(EXIT_FAILURE); } std::cout << "OK" << std::endl; delete[] results_h; } //////////////////////////////////////////////////////////////////////////////// // Main entry point. //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { int num_items = 128; bool verbose = false; if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "h")) { std::cerr << "Usage: " << argv[0] << " num_items=<num_items>\twhere num_items is the number of items to sort" << std::endl; exit(EXIT_SUCCESS); } if (checkCmdLineFlag(argc, (const char **)argv, "v")) { verbose = true; } if (checkCmdLineFlag(argc, (const char **)argv, "num_items")) { num_items = getCmdLineArgumentInt(argc, (const char **)argv, "num_items"); if (num_items < 1) { std::cerr << "ERROR: num_items has to be greater than 1" << std::endl; exit(EXIT_FAILURE); } } // Get device properties int device_count = 0, device = -1; if(checkCmdLineFlag(argc, (const char **)argv, "device")) { device = getCmdLineArgumentInt(argc, (const char **)argv, "device"); cudaDeviceProp properties; checkCudaErrors(cudaGetDeviceProperties(&properties, device)); if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5)) { std::cout << "Running on GPU " << device << " (" << properties.name << ")" << std::endl; } else { std::cout << "ERROR: cdpsimpleQuicksort requires GPU devices with compute SM 3.5 or higher."<< std::endl; std::cout << "Current GPU device has compute SM" << properties.major <<"."<< properties.minor <<". Exiting..." << std::endl; exit(EXIT_FAILURE); } } else { checkCudaErrors(cudaGetDeviceCount(&device_count)); for (int i = 0 ; i < device_count ; ++i) { cudaDeviceProp properties; checkCudaErrors(cudaGetDeviceProperties(&properties, i)); if (properties.major > 3 || (properties.major == 3 && properties.minor >= 5)) { device = i; std::cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl; break; } std::cout << "GPU " << i << " (" << properties.name << ") does not support CUDA Dynamic Parallelism" << std::endl; } } if (device == -1) { std::cerr << "cdpSimpleQuicksort requires GPU devices with compute SM 3.5 or higher. Exiting..." << std::endl; exit(EXIT_WAIVED); } cudaSetDevice(device); // Create input data unsigned int *h_data = 0; unsigned int *d_data = 0; // Allocate CPU memory and initialize data. std::cout << "Initializing data:" << std::endl; h_data =(unsigned int *)malloc(num_items*sizeof(unsigned int)); initialize_data(h_data, num_items); if (verbose) { for (int i=0 ; i<num_items ; i++) std::cout << "Data [" << i << "]: " << h_data[i] << std::endl; } // Allocate GPU memory. checkCudaErrors(cudaMalloc((void **)&d_data, num_items * sizeof(unsigned int))); checkCudaErrors(cudaMemcpy(d_data, h_data, num_items * sizeof(unsigned int), cudaMemcpyHostToDevice)); // Execute std::cout << "Running quicksort on " << num_items << " elements" << std::endl; run_qsort(d_data, num_items); // Check result std::cout << "Validating results: "; check_results(num_items, d_data); free(h_data); checkCudaErrors(cudaFree(d_data)); exit(EXIT_SUCCESS); }
e5b713b1bcda62ea7ad7d97da56cbeaf0007f7c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/remove.h> #include <hip/hip_cooperative_groups.h> typedef int mytype; const int test_dsize = 256; const int nTPB = 256; template <typename T> __device__ unsigned predicate_test(T data, T testval){ if (data == testval) return 0; return 1; } using namespace cooperative_groups; // assume dsize is divisbile by nTPB template <typename T> __global__ void my_remove_if(const T * __restrict__ idata, const T remove_val, T * __restrict__ odata, unsigned * __restrict__ idxs, const unsigned dsize){ __shared__ unsigned sidxs[nTPB]; auto g = this_thread_block(); auto gg = this_grid(); unsigned tidx = g.thread_rank(); unsigned gidx = tidx + nTPB*g.group_index().x; unsigned gridSize = g.size()*gridDim.x; // first use grid-stride loop to have each block do a prefix sum over data set for (unsigned i = gidx; i < dsize; i+=gridSize){ unsigned temp = predicate_test(idata[i], remove_val); sidxs[tidx] = temp; for (int j = 1; j < g.size(); j<<=1){ g.sync(); if (j <= tidx){ temp += sidxs[tidx-j];} g.sync(); if (j <= tidx){ sidxs[tidx] = temp;}} idxs[i] = temp; g.sync();} // grid-wide barrier gg.sync(); // then compute final index, and move input data to output location unsigned stride = 0; for (unsigned i = gidx; i < dsize; i+=gridSize){ T temp = idata[i]; if (predicate_test(temp, remove_val)){ unsigned my_idx = idxs[i]; for (unsigned j = 1; (j-1) < (g.group_index().x+(stride*gridDim.x)); j++) my_idx += idxs[j*nTPB-1]; odata[my_idx-1] = temp;} stride++;} } int main(){ // data setup mytype *d_idata, *d_odata, *h_data; unsigned *d_idxs; size_t tsize = ((size_t)test_dsize)*sizeof(mytype); h_data = (mytype *)malloc(tsize); hipMalloc(&d_idata, tsize); hipMalloc(&d_odata, tsize); hipMemset(d_odata, 0, tsize); hipMalloc(&d_idxs, test_dsize*sizeof(unsigned)); // check for support and device configuration // and calculate maximum grid size hipDeviceProp_t prop; hipError_t err = hipGetDeviceProperties(&prop, 0); if (err != hipSuccess) {printf("cuda error: %s\n", hipGetErrorString(err)); return 0;} if (prop.cooperativeLaunch == 0) {printf("cooperative launch not supported\n"); return 0;} int numSM = prop.multiProcessorCount; printf("number of SMs = %d\n", numSM); int numBlkPerSM; hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlkPerSM, my_remove_if<mytype>, nTPB, 0); printf("number of blocks per SM = %d\n", numBlkPerSM); // test 1: no remove values for (int i = 0; i < test_dsize; i++) h_data[i] = i; hipMemcpy(d_idata, h_data, tsize, hipMemcpyHostToDevice); hipStream_t str; hipStreamCreate(&str); mytype remove_val = -1; unsigned ds = test_dsize; void *args[] = {(void *)&d_idata, (void *)&remove_val, (void *)&d_odata, (void *)&d_idxs, (void *)&ds}; dim3 grid(numBlkPerSM*numSM); dim3 block(nTPB); hipLaunchCooperativeKernel((void *)my_remove_if<mytype>, grid, block, args, 0, str); err = hipMemcpy(h_data, d_odata, tsize, hipMemcpyDeviceToHost); if (err != hipSuccess) {printf("cuda error: %s\n", hipGetErrorString(err)); return 0;} //validate for (int i = 0; i < test_dsize; i++) if (h_data[i] != i){printf("mismatch 1 at %d, was: %d, should be: %d\n", i, h_data[i], i); return 1;} // test 2: with remove values int val = 0; for (int i = 0; i < test_dsize; i++){ if ((rand()/(float)RAND_MAX) > 0.5) h_data[i] = val++; else h_data[i] = -1;} thrust::device_vector<mytype> t_data(h_data, h_data+test_dsize); hipMemcpy(d_idata, h_data, tsize, hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchCooperativeKernel((void *)my_remove_if<mytype>, grid, block, args, 0, str); hipEventRecord(stop); float et; hipMemcpy(h_data, d_odata, tsize, hipMemcpyDeviceToHost); hipEventElapsedTime(&et, start, stop); //validate for (int i = 0; i < val; i++) if (h_data[i] != i){printf("mismatch 2 at %d, was: %d, should be: %d\n", i, h_data[i], i); return 1;} printf("kernel time: %fms\n", et); hipEventRecord(start); thrust::remove(t_data.begin(), t_data.end(), -1); hipEventRecord(stop); thrust::host_vector<mytype> th_data = t_data; // validate for (int i = 0; i < val; i++) if (h_data[i] != th_data[i]){printf("mismatch 3 at %d, was: %d, should be: %d\n", i, th_data[i], h_data[i]); return 1;} hipEventElapsedTime(&et, start, stop); printf("thrust time: %fms\n", et); return 0; }
e5b713b1bcda62ea7ad7d97da56cbeaf0007f7c8.cu
#include <stdio.h> #include <stdlib.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/remove.h> #include <cooperative_groups.h> typedef int mytype; const int test_dsize = 256; const int nTPB = 256; template <typename T> __device__ unsigned predicate_test(T data, T testval){ if (data == testval) return 0; return 1; } using namespace cooperative_groups; // assume dsize is divisbile by nTPB template <typename T> __global__ void my_remove_if(const T * __restrict__ idata, const T remove_val, T * __restrict__ odata, unsigned * __restrict__ idxs, const unsigned dsize){ __shared__ unsigned sidxs[nTPB]; auto g = this_thread_block(); auto gg = this_grid(); unsigned tidx = g.thread_rank(); unsigned gidx = tidx + nTPB*g.group_index().x; unsigned gridSize = g.size()*gridDim.x; // first use grid-stride loop to have each block do a prefix sum over data set for (unsigned i = gidx; i < dsize; i+=gridSize){ unsigned temp = predicate_test(idata[i], remove_val); sidxs[tidx] = temp; for (int j = 1; j < g.size(); j<<=1){ g.sync(); if (j <= tidx){ temp += sidxs[tidx-j];} g.sync(); if (j <= tidx){ sidxs[tidx] = temp;}} idxs[i] = temp; g.sync();} // grid-wide barrier gg.sync(); // then compute final index, and move input data to output location unsigned stride = 0; for (unsigned i = gidx; i < dsize; i+=gridSize){ T temp = idata[i]; if (predicate_test(temp, remove_val)){ unsigned my_idx = idxs[i]; for (unsigned j = 1; (j-1) < (g.group_index().x+(stride*gridDim.x)); j++) my_idx += idxs[j*nTPB-1]; odata[my_idx-1] = temp;} stride++;} } int main(){ // data setup mytype *d_idata, *d_odata, *h_data; unsigned *d_idxs; size_t tsize = ((size_t)test_dsize)*sizeof(mytype); h_data = (mytype *)malloc(tsize); cudaMalloc(&d_idata, tsize); cudaMalloc(&d_odata, tsize); cudaMemset(d_odata, 0, tsize); cudaMalloc(&d_idxs, test_dsize*sizeof(unsigned)); // check for support and device configuration // and calculate maximum grid size cudaDeviceProp prop; cudaError_t err = cudaGetDeviceProperties(&prop, 0); if (err != cudaSuccess) {printf("cuda error: %s\n", cudaGetErrorString(err)); return 0;} if (prop.cooperativeLaunch == 0) {printf("cooperative launch not supported\n"); return 0;} int numSM = prop.multiProcessorCount; printf("number of SMs = %d\n", numSM); int numBlkPerSM; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlkPerSM, my_remove_if<mytype>, nTPB, 0); printf("number of blocks per SM = %d\n", numBlkPerSM); // test 1: no remove values for (int i = 0; i < test_dsize; i++) h_data[i] = i; cudaMemcpy(d_idata, h_data, tsize, cudaMemcpyHostToDevice); cudaStream_t str; cudaStreamCreate(&str); mytype remove_val = -1; unsigned ds = test_dsize; void *args[] = {(void *)&d_idata, (void *)&remove_val, (void *)&d_odata, (void *)&d_idxs, (void *)&ds}; dim3 grid(numBlkPerSM*numSM); dim3 block(nTPB); cudaLaunchCooperativeKernel((void *)my_remove_if<mytype>, grid, block, args, 0, str); err = cudaMemcpy(h_data, d_odata, tsize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) {printf("cuda error: %s\n", cudaGetErrorString(err)); return 0;} //validate for (int i = 0; i < test_dsize; i++) if (h_data[i] != i){printf("mismatch 1 at %d, was: %d, should be: %d\n", i, h_data[i], i); return 1;} // test 2: with remove values int val = 0; for (int i = 0; i < test_dsize; i++){ if ((rand()/(float)RAND_MAX) > 0.5) h_data[i] = val++; else h_data[i] = -1;} thrust::device_vector<mytype> t_data(h_data, h_data+test_dsize); cudaMemcpy(d_idata, h_data, tsize, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaLaunchCooperativeKernel((void *)my_remove_if<mytype>, grid, block, args, 0, str); cudaEventRecord(stop); float et; cudaMemcpy(h_data, d_odata, tsize, cudaMemcpyDeviceToHost); cudaEventElapsedTime(&et, start, stop); //validate for (int i = 0; i < val; i++) if (h_data[i] != i){printf("mismatch 2 at %d, was: %d, should be: %d\n", i, h_data[i], i); return 1;} printf("kernel time: %fms\n", et); cudaEventRecord(start); thrust::remove(t_data.begin(), t_data.end(), -1); cudaEventRecord(stop); thrust::host_vector<mytype> th_data = t_data; // validate for (int i = 0; i < val; i++) if (h_data[i] != th_data[i]){printf("mismatch 3 at %d, was: %d, should be: %d\n", i, th_data[i], h_data[i]); return 1;} cudaEventElapsedTime(&et, start, stop); printf("thrust time: %fms\n", et); return 0; }
3221bd2d52d228f0794fa60d5b65ccdeb28181f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void getValueChar(char *outdata, char *indata) { outdata[0] = indata[0] + 3; }
3221bd2d52d228f0794fa60d5b65ccdeb28181f1.cu
#include "includes.h" __global__ void getValueChar(char *outdata, char *indata) { outdata[0] = indata[0] + 3; }
dd8dd4ed9832b4e813081015b2d1ad2867a329e0.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <hip/hip_runtime.h> #include <complex> #include <thrust/complex.h> #include <hip/hip_complex.h> using namespace std; __global__ void fft(thrust::complex<float> *, thrust::complex<float> *); int main() { const int N = 10; // An array of complex numbers per the specification complex<double> data[N] = {3.6 + 2.6 * 1i, 2.9 + 6.3 * 1i, 5.6 + 4.0 * 1i, 4.8 + 9.1 * 1i, 3.3 + 0.4 * 1i, 5.9 + 4.8 * 1i, 5.0 + 2.6 * 1i, 4.3 + 4.1 * 1i}; // The results array, initialized to 0 + 0i for each element complex<double> results[N]; // An output file for FFT calculations ofstream outfile; outfile.open("output.txt"); // Device pointers thrust::complex<float> *datad; thrust::complex<float> *resultsd; // Size of double complex data type const int COMPLEX_ARRAY_SIZE = N * sizeof(hipComplex); hipMalloc( (void**)&datad, COMPLEX_ARRAY_SIZE ); hipMalloc( (void**)&resultsd, COMPLEX_ARRAY_SIZE ); hipMemcpy( datad, data, COMPLEX_ARRAY_SIZE, hipMemcpyHostToDevice ); hipMemcpy( resultsd, results, COMPLEX_ARRAY_SIZE, hipMemcpyHostToDevice ); dim3 dimGrid( 1, 1 ); dim3 dimBlock( N, 1 ); // Invoke the kernel hipLaunchKernelGGL(( fft), dim3(dimGrid), dim3(dimBlock), 0, 0, datad, resultsd); hipMemcpy(results, resultsd, COMPLEX_ARRAY_SIZE, hipMemcpyDeviceToHost); hipFree( datad ); hipFree( resultsd ); // Output the results to fft_output.txt outfile << "TOTAL PROCESSED SAMPLES: %i\n"; outfile << "================================\n"; // Print X, the results array for (int i = 0; i < N; i++) { outfile << results[i] << '\n'; outfile << "================================\n"; } // Close output file outfile.close(); return 0; } __global__ void fft(thrust::complex<float> *datad, thrust::complex<float> *resultsd) { int i = threadIdx.x; resultsd[i] = datad[i]; }
dd8dd4ed9832b4e813081015b2d1ad2867a329e0.cu
#include <iostream> #include <fstream> #include <cuda.h> #include <complex> #include <thrust/complex.h> #include <cuComplex.h> using namespace std; __global__ void fft(thrust::complex<float> *, thrust::complex<float> *); int main() { const int N = 10; // An array of complex numbers per the specification complex<double> data[N] = {3.6 + 2.6 * 1i, 2.9 + 6.3 * 1i, 5.6 + 4.0 * 1i, 4.8 + 9.1 * 1i, 3.3 + 0.4 * 1i, 5.9 + 4.8 * 1i, 5.0 + 2.6 * 1i, 4.3 + 4.1 * 1i}; // The results array, initialized to 0 + 0i for each element complex<double> results[N]; // An output file for FFT calculations ofstream outfile; outfile.open("output.txt"); // Device pointers thrust::complex<float> *datad; thrust::complex<float> *resultsd; // Size of double complex data type const int COMPLEX_ARRAY_SIZE = N * sizeof(cuComplex); cudaMalloc( (void**)&datad, COMPLEX_ARRAY_SIZE ); cudaMalloc( (void**)&resultsd, COMPLEX_ARRAY_SIZE ); cudaMemcpy( datad, data, COMPLEX_ARRAY_SIZE, cudaMemcpyHostToDevice ); cudaMemcpy( resultsd, results, COMPLEX_ARRAY_SIZE, cudaMemcpyHostToDevice ); dim3 dimGrid( 1, 1 ); dim3 dimBlock( N, 1 ); // Invoke the kernel fft<<<dimGrid, dimBlock>>>(datad, resultsd); cudaMemcpy(results, resultsd, COMPLEX_ARRAY_SIZE, cudaMemcpyDeviceToHost); cudaFree( datad ); cudaFree( resultsd ); // Output the results to fft_output.txt outfile << "TOTAL PROCESSED SAMPLES: %i\n"; outfile << "================================\n"; // Print X, the results array for (int i = 0; i < N; i++) { outfile << results[i] << '\n'; outfile << "================================\n"; } // Close output file outfile.close(); return 0; } __global__ void fft(thrust::complex<float> *datad, thrust::complex<float> *resultsd) { int i = threadIdx.x; resultsd[i] = datad[i]; }
b8e5e992190061a1f55020071ad9344feca293f4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <time.h> #include <cmath> #include <chrono> #include <iomanip> #include <fstream> #include <cudnn.h> #include "math_constants.h" //Eigen includes #include <Eigen/Dense> #include <Eigen/Sparse> //Boost #include "boost/program_options.hpp" #include <boost/filesystem/operations.hpp> #include <boost/filesystem/path.hpp> //My own includes #include "add_model_info.h" #include "logger.h" #include "global_params.h" #include "prev_states.h" #include "input_file_prep.h" #include "BZ_CUDA_UTIL.h" #include "conv_char.h" #include "encoder_multi_source.h" #include "bi_encoder.h" #include "attention_layer.h" #include "attention_node.h" #include "attention_combiner.h" #include "decoder_model_wrapper.h" #include "ensemble_factory.h" #include "base_layer.h" #include "NCE.h" #include "gpu_info_struct.h" #include "custom_kernels.h" #include "Hidden_To_Hidden_Layer.h" #include "LSTM_HH.h" #include "model.h" #include "fileHelper.h" #include "fileHelper_source.h" #include "Eigen_Util.h" #include "model.hpp" //#include "base_layer.hpp" #include "LSTM.hpp" #include "softmax.hpp" #include "Input_To_Hidden_Layer.hpp" #include "Hidden_To_Hidden_Layer.hpp" #include "LSTM_HH.hpp" #include "decoder_model_wrapper.hpp" #include "ensemble_factory.hpp" #include "attention_layer.hpp" #include "attention_node.hpp" #include "NCE.hpp" #include "bi_encoder.hpp" #include "encoder_multi_source.hpp" #include "tree_LSTM.hpp" #include "input_file_prep.hpp" #include "attention_combiner.hpp" #include "attention_combiner_node.hpp" #include "conv_char.hpp" #include "highway_network.hpp" //parse the command line from the user void command_line_parse(global_params &params,int argc, char **argv) { //files for keeping the user input //if not s, 1st source, 2nd target, 3rd output weights name //if s, 1st target, 2nd output weights name std::vector<std::string> train_files; //files for force decoding //if not s, 1. source input file 2. target input file 3. neural network file name 4. output file name //if s, 1. target input file 2. neural network file name 3. output file name std::vector<std::string> test_files; //stuff for adaptive learning rate schedule //if not seq , 1st is source dev, 2nd is target dev //if seq 1st is target dev std::vector<std::string> adaptive_learning_rate; //lower and upper range for parameter initialization std::vector<precision> lower_upper_range; //for the kbest flag, 4 arguements must be entered for kbest, 1. number of best paths 2 input file name //3. neural network file name (this is the output file you get after training the neural network)4. output file name std::vector<std::string> kbest_files; //for stoic gen, 1st neural network file, 2nd is output file name std::vector<std::string> stoicgen_files; //truncated softmax std::vector<std::string> trunc_info; //for decoding ratios std::vector<precision> decoding_ratio; //for continuing to train std::vector<std::string> cont_train; //for multi gpu training std::vector<int> gpu_indicies; std::vector<precision> clip_cell_vals; std::vector<double> NCE_vals; //for multisource std::vector<std::string> multi_source; //for char-mt std::vector<int> char_mt_vec; //basic format setup namespace po = boost::program_options; po::options_description desc("Options"); desc.add_options() ("help,h", "Run to get help on how to use the program. This is version 1.0") ("train,t",po::value<std::vector<std::string> > (&train_files)->multitoken(),"Train a model with input data file(s) and a name for the neural network output file"\ ". \nFORMAT (if sequence to sequence): <source file name> <target file name> <neural network output name> "\ " \nFORMAT (if sequence): <target file name> <neural network output name>") ("cont-train,C",po::value<std::vector<std::string>> (&cont_train)->multitoken(),"Resume training of a model (THIS WILL OVERWRITE THE MODEL FILE PASSED IN)\n"\ "FORMAT: (if sequence to sequence): <source file name> <target file name> <neural network file name>\n"\ "FORMAT: (if seq): <target file name> <neural network file name>") ("vocab-mapping-file",po::value<std::string> (&params.ensemble_train_file_name),"Train a model with the same integerization mappings as another model. This is needed to do ensemble decoding\n"\ "FORMAT: <neural network file name>") ("train-source-RNN",po::value<bool>(&deniz::train_source_RNN),"train source RNN. DEFAULT: True") ("train-target-RNN",po::value<bool>(&deniz::train_target_RNN),"train target RNN. DEFAULT: True") ("train-source-input-embedding",po::value<bool>(&deniz::train_source_input_embedding),"train source input embeddings. DEFAULT: True") ("train-target-input-embedding",po::value<bool>(&deniz::train_target_input_embedding),"train target input embeddings. DEFAULT: True") ("train-target-output-embedding",po::value<bool>(&deniz::train_target_output_embedding),"train target output embeddings. DEFAULT: True") ("train-attention-target-RNN",po::value<bool>(&deniz::train_attention_target_RNN),"train target attention. DEFAULT: True") ("vocab-mapping-file-multi-source",po::value<std::string> (&params.multi_src_params.ensemble_train_file_name),"specify multi-source mapping for vocab") ("multi-source",po::value<std::vector<std::string>> (&multi_source)->multitoken(),"Specify the second source training file and mapping file for the multi-source model") //("multi-attention",po::value<bool>(&params.multi_src_params.multi_attention),"for attention model with multiple sources\n") ("multi-attention",po::value<bool>(&params.multi_src_params.multi_attention_v2),"Make the multi-source seq-to-seq model use attention\n") //("char-mt",po::value<std::vector<int>> (&char_mt_vec)->multitoken(),"<filter_size> <char_emb_size> <num highway layers> \n") //("add-ht",po::value<bool>(&params.multi_src_params.add_ht),"add hiddenstates for both attention models instead of sending through neural network\n") //("print-norms",po::value<bool>(&BZ_CUDA::print_norms),"Print out norms of all matrices\n") ("lstm-combine",po::value<bool>(&params.multi_src_params.lstm_combine),"For multi source seq-to-seq model, use the child-sum combination method if set to true, else use the basic method. DEFAULT: false\n") ("num-layers,N",po::value<int>(&params.num_layers),"Set the number of LSTM layers you want for your model\n DEFAULT: 1") ("multi-gpu,M",po::value<std::vector<int>> (&gpu_indicies)->multitoken(), "Train the model on multiple gpus.\nFORMAT: <gpu for layer 1> <gpu for layer 2> ... <gpu for softmax>\n"\ "DEFAULT: all layers and softmax lie on gpu 0") ("force-decode,f",po::value<std::vector<std::string> > (&test_files)->multitoken(), "Get per line probability of dataset plus the perplexity\n"\ "FORMAT: (if sequence to sequence): <source file name> <target file name> <trained neural network file name> <output file name>\n"\ "FORMAT: (if sequence): <target file name> <trained neural network file name> <output file name>") // ("stoch-gen,g", po::value<std::vector<std::string> > (&stoicgen_files)->multitoken(),"Do random generation for a sequence model, such as a language model\n"\ // "FORMAT: <neural network file name> <output file name>") // ("stoch-gen-len",po::value<int>(&params.sg_length) ,"How many sentences to let stoch-gen run for\n"\ // "FORMAT: <num sentences>\n" // "DEFAULT: 100") //("dump-alignments",po::value<bool>(&params.attent_params.dump_alignments),"Dump the alignments to a file") // ("temperature",po::value<double>(&params.temperature) ,"What should the temperature be for the stoch generation"\ // "FORMAT: <temperature> where temperature is typically between [0,1]. A lower temperature makes the model output less and less from what it memorized from training\n"\ // "DEFAULT: 1") ("sequence,s", "Train model that learns a sequence,such as language modeling. Default model is sequence to sequence model") ("tmp-dir-location",po::value<std::string>(&params.tmp_location),"For all modes in the code, a tmp directiory must be created for data preparation. Specify the location of where you want this to be created. DEFAULT: Current directory") //("bi-directional",po::value<bool>(&params.bi_dir_params.bi_dir),"Have the source sequence be encoded bi-diretionally\n") //("combine-bi-directional",po::value<bool>(&params.bi_dir_params.bi_dir_comb),"send a nonlinear tranformation of the rev and nonrev hidden states from the source encoders to the decoder\n") //("share-embedding",po::value<bool>(&params.bi_dir_params.share_embeddings),"For the bidirectional encoder, share the embeddings") ("dropout,d",po::value<precision>(&params.dropout_rate),"Use dropout and set the dropout rate. This value is the probability of keeping a node. FORMAT: <dropout rate>. DEFAULT: 1.0") ("learning-rate,l",po::value<precision>(&params.learning_rate),"Set the learning rate. DEFAULT: 0.5") ("random-seed",po::value<int>(&params.random_seed_int),"Specify a random seed, instead of the model being seeded with the current time\n") ("longest-sent,L",po::value<int>(&params.longest_sent),"Set the maximum sentence length for training/force-decode/decode. DEFAULT: 100") ("hiddenstate-size,H",po::value<int>(&params.LSTM_size),"Set hiddenstate size. DEFAULT: 100") //("UNK-replacement",po::value<int>(&params.unk_aligned_width),"Set unk replacement to be true and set the wideth\n FORMAT: <alignment width>") // ("truncated-softmax,T",po::value<std::vector<std::string>> (&trunc_info)->multitoken(),"Use truncated softmax\n DEFAULT: not being used\n"\ // "FORMAT: <shortlist size> <sampled size>") ("UNK-decode",po::value<std::string>(&BZ_CUDA::unk_rep_file_name),"Use unk replacement at decoding time if you have an attention model. Specify a file that the system will output information to. \ This file will then need to be passed to the python script") ("NCE",po::value<int>(&params.num_negative_samples),"Use an NCE loss function, specify the number of noise samples you want (these are shared across the minibatch for speed). DEFAULT: uses MLE not NCE") ("NCE-share-samples",po::value<bool>(&params.share_samples),"Share the noise samples across the minibatch when using NCE for a speed increase. DEFAULT: True ") //("NCE-leg-dump",po::value<bool>(&BZ_CUDA::nce_legacy_dump),"Dont use this option") ("NCE-score",po::value<bool>(&BZ_CUDA::nce_score),"Bool for using unnormalized softmax outputs for force decoding. This will make the probabilities not sum to 1, but makes decoding significanly faster. You must have trained the model with NCE for this to work. DEFAULT: false") //("ASHISH-NCE-STATS",po::value<bool>(&BZ_CUDA::dump_NCE_stats),"for ashish") ("attention-model",po::value<bool>(&params.attent_params.attention_model),"Bool for whether you want to train with the attention mode. DEFAULT: False\n") ("attention-width",po::value<int>(&params.attent_params.D),"How many words do you want to look at around the alignment position on one half. DEFAULT: 10\n") ("feed-input",po::value<bool>(&params.attent_params.feed_input),"Bool for wether you want feed input for the attention model. DEFAULT: False\n") ("source-vocab-size,v",po::value<int>(&params.source_vocab_size),"Set source vocab size\n DEFAULT: number of unique words in source training corpus") ("target-vocab-size,V",po::value<int>(&params.target_vocab_size),"Set target vocab size\n DEFAULT: number of unique words in target training corpus") ("shuffle",po::value<bool>(&params.shuffle),"true if you want to shuffle the train data. DEFAULT: True") ("parameter-range,P",po::value<std::vector<precision> > (&lower_upper_range)->multitoken(),"parameter initialization range\n"\ "FORMAT: <Lower range value> <Upper range value>\n DEFAULT: -0.08 0.08") ("number-epochs,n",po::value<int>(&params.num_epochs),"Set number of epochs. DEFAULT: 10") ("matrix-clip-gradients,c",po::value<precision>(&params.norm_clip),"Set gradient clipping threshold\n DEFAULT: 5") //("ind-clip-gradients,i",po::value<precision>(&BZ_CUDA::ind_norm_clip_thres),"CURRENT THIS DOES NOT WORK!!!!!!!!!!!!!!!!!!! \nSet gradient clipping threshold for individual elements\n DEFAULT: 0.1") ("whole-clip-gradients,w",po::value<precision>(&params.norm_clip),"Set gradient clipping threshold for all gradients\n DEFAULT: 5") ("adaptive-halve-lr,a",po::value<std::vector<std::string>> (&adaptive_learning_rate)->multitoken(),"change the learning rate"\ " when the perplexity on your specified dev set increases from the previous half epoch by some constant, so "\ " new_learning_rate = constant*old_learning rate, by default the constant is 0.5, but can be set using adaptive-decrease-factor\n" "FORMAT: (if sequence to sequence): <source dev file name> <target dev file name>\n"\ "FORMAT: (if sequence): <target dev file name>") ("clip-cell",po::value<std::vector<precision>>(&clip_cell_vals)->multitoken(),"Specify the cell clip threshold and the error threshold in backprop.\n FORMAT: <Cell clip threshold> <Error clip Threshold> . Recommended values: <50> <1000>. DEFAULT: not used\n") ("adaptive-decrease-factor,A",po::value<precision>(&params.decrease_factor),"To be used with adaptive-halve-lr"\ " it\n DEFAULT: 0.5") ("fixed-halve-lr",po::value<int> (&params.epoch_to_start_halving),"Halve the learning rate"\ " after a certain epoch, every half epoch afterwards by a specific amount. FORMAT: <epoch number>") ("fixed-halve-lr-full",po::value<int> (&params.epoch_to_start_halving_full),"Halve the learning rate"\ " after a certain epoch, every epoch afterwards by a specific amount. FORMAT: <epoch number>") ("minibatch-size,m",po::value<int>(&params.minibatch_size),"Set minibatch size. DEFAULT: 8.") ("screen-print-rate",po::value<int>(&params.screen_print_rate),"Set after how many minibatches you want to print info to the stdout and/or the logfile\n DEFAULT: 5") ("logfile",po::value<std::string>(&params.HPC_output_file_name),"Dump the terminal output to a" \ "file \n FORMAT: <file name>") ("best-model,B",po::value<std::string>(&params.best_model_file_name),"During train have the best model (determined by validation perplexity) be written to a file\nFORMAT: <output file name>") ("save-all-models",po::value<bool>(&BZ_CUDA::dump_every_best),"Save the every model every half epoch") ("decode,k",po::value<std::vector<std::string> > (&kbest_files)->multitoken(),"Get top decoding outputs using beam search in sequence to sequence model. You can specify more than one model for ensemble decoding\n"\ "FORMAT: <how many outputs> <neural network file 1> <neural network file 2> ... <output file>") ("decode-main-data-files",po::value<std::vector<std::string> > (&params.decode_user_files)->multitoken(),"FORMAT: <data file 1> <data file 2> ... ") ("decode-multi-source-data-files",po::value<std::vector<std::string> > (&params.decode_user_files_additional)->multitoken(),"FORMAT: <multi-source data file 1> <multi-source data file 2> ... ") ("decode-multi-source-vocab-mappings",po::value<std::vector<std::string> > (&params.model_names_multi_src)->multitoken(),"FORMAT: <multi-source vocab mapping 1> <multi-source vocab mapping 2> ... ") ("pre-norm-ensemble",po::value<bool>(&BZ_CUDA::pre_norm),"For --decode, ensemble the models before they are normalized to probabilities") ("beam-size,b",po::value<int>(&params.beam_size),"Set beam size for --decode paths\n DEFAULT: 12") ("penalty,p",po::value<precision>(&params.penalty),"Set penalty for --decode decoding. The value entered"\ " will be added to the log probability score per target word decoded. This can make the model favor longer sentences for decoding\n DEFAULT: 0") ("print-score",po::value<bool>(&params.print_score),"Set if you want to print out the unnormalized log prob for each path when using --decode"\ "FORMAT: <bool> \nthe bool is 1 if you want to print the score or 0 otherwise.\n DEFAULT: false") ("dec-ratio",po::value<std::vector<precision>>(&decoding_ratio)->multitoken(),"Set the min and max decoding length rations when using --decode\n"\ "This means that a target decoded sentence must be at least min_dec_ratio*len(source sentence)"\ " and not longer than max_dec_ratio*len(source sentence)\nFORMAT: <min ration> <max ratio>\n"\ "DEFAULT: 0.5, 1.5"); // ("tsne-dump",po::value<bool>(&BZ_STATS::tsne_dump),"for dumping multi-source hiddenstates during decoding") // ("Dump-LSTM",po::value<std::string>(&params.LSTM_dump_file),"Print the output at each timestep from the LSTM\nFORMAT: <output file name>\n"\ // "The file lines that are output are the following: 1.input word, embedding 2.Forget gate 3.input gate"\ // " 4.c_t 5.output gate 6.h_t 7.probabilities"); po::variables_map vm; //kbest should be changed to decode. train-emsemble should be changed to vocab-mapping-file. screen-print-rate should be changed //Declare license for the code. LGPL license or MIT license?. try { po::store(po::parse_command_line(argc, argv, desc), vm); po::notify(vm); std::cout << "------------- Printing options that have currently being set by the user -------------\n"; //now try to loop over all the boost program options for (auto it=vm.begin(); it != vm.end(); it++) { std::cout << "Variable: " << it->first << " Value: "; auto& value = it->second.value(); if (auto v = boost::any_cast<int>(&value)) { std::cout << *v << "\n"; } else if (auto v = boost::any_cast<bool>(&value)) { std::cout << *v << "\n"; } else if (auto v = boost::any_cast<float>(&value)) { std::cout << *v << "\n"; } else if(auto v = boost::any_cast<double>(&value)) { std::cout << *v << "\n"; } else if(auto v = boost::any_cast<std::string>(&value)) { std::cout << *v << "\n"; } else if(std::vector<std::string> *v = boost::any_cast<std::vector<std::string>>(&value)) { std::vector<std::string> vv = *v; for(int i=0; i<vv.size(); i++) { std::cout << " " << vv[i] << " "; } std::cout << "\n"; } else { std::cout << "Not Printable\n"; } } std::cout << "--------------------------------------------------------------------------------------\n\n"; //see if the user specified the help flag if ( vm.count("help") ) { std::cout << "\n------------------------------\n"; std::cout << "This is Barret Zoph's GPU RNN library\n" << "The flags for the command line interface are below\n" << "Look at the README for an indepth tutorial and example commands\n" << "" << "\n"; std::cout << desc << "\n"; exit (EXIT_FAILURE); } if (vm.count("random-seed") ) { params.random_seed = true; } if (vm.count("tmp-dir-location")) { if (params.tmp_location != "") { if (params.tmp_location[params.tmp_location.size()-1]!='/') { params.tmp_location+="/"; } } } if(vm.count("shuffle")) { BZ_CUDA::shuffle_data = params.shuffle; } if(vm.count("logfile")) { params.HPC_output = true; //BZ_CUDA::HPC_output = true; } BZ_CUDA::logger.SetOutputLogger(params.HPC_output_file_name,params.HPC_output); //error checks to be sure only once of these options is set if (vm.count("train") && vm.count("decode")) { BZ_CUDA::logger << "ERROR: you cannot train and get decode at the same time\n"; exit (EXIT_FAILURE); } if (vm.count("train") && vm.count("force-decode")) { BZ_CUDA::logger << "ERROR: you cannot train and force-decode at the same time\n"; exit (EXIT_FAILURE); } if (vm.count("force-decode") && vm.count("decode")) { BZ_CUDA::logger << "ERROR: you cannot force-decode and get decode at the same time\n"; exit (EXIT_FAILURE); } if (!(vm.count("train") || vm.count("force-decode") || vm.count("decode")||vm.count("stoch-gen") || vm.count("cont-train") )) { BZ_CUDA::logger << "ERROR: you must either train,continue training,get decode,stoch generate data or force-decode\n"; exit (EXIT_FAILURE); } if(vm.count("parameter-range")) { BZ_CUDA::lower = lower_upper_range[0]; BZ_CUDA::upper = lower_upper_range[1]; } if(vm.count("cont-train")) { BZ_CUDA::cont_train = true; } else { BZ_CUDA::cont_train = false; } //this is for making sure dev_synch_all only loops over current GPU's specified // if(vm.count("multi-gpu")) { // if(gpu_indicies.size()==0) { // gpu_info::device_numbers.push_back(0); // } // else { // gpu_info::device_numbers = gpu_indicies; // } // } if(vm.count("clip-cell")) { if(clip_cell_vals.size()!=2) { BZ_CUDA::logger << "ERROR: clip-cell must have exactly two arguement\n"; exit (EXIT_FAILURE); } BZ_CUDA::clip_cell = true; BZ_CUDA::cell_clip_threshold = clip_cell_vals[0]; BZ_CUDA::error_clip_threshold = clip_cell_vals[1]; } params.longest_sent+=4; //because it is really 4 less if(vm.count("UNK-decode")) { BZ_CUDA::unk_replacement = true; BZ_CUDA::unk_rep_file_stream.open(BZ_CUDA::unk_rep_file_name.c_str()); for(int i=0; i<params.beam_size; i++) { BZ_CUDA::viterbi_alignments.push_back(-1); } for(int i=0; i<params.beam_size * params.longest_sent; i++) { BZ_CUDA::alignment_scores.push_back(0); } BZ_CUDA::h_align_indicies = (int*)malloc((2*params.attent_params.D+1)*params.beam_size*sizeof(int)); BZ_CUDA::h_alignment_values = (precision*)malloc((2*params.attent_params.D+1)*params.beam_size*sizeof(precision)); } if(vm.count("char-mt")) { params.char_params.char_cnn = true; params.char_params.filter_size = char_mt_vec[0]; params.char_params.char_emb_size = char_mt_vec[1]; params.char_params.num_highway_layers = char_mt_vec[2]; extract_char_info(params.char_params.longest_word,params.char_params.num_unique_chars_source, params.char_params.num_unique_chars_target,params.source_vocab_size,params.target_vocab_size, params.char_params.char_mapping_file,params.char_params.word_mapping_file); } if(vm.count("train") || vm.count("cont-train")) { if(vm.count("multi-source")) { if(multi_source.size()!=2) { BZ_CUDA::logger << "ERROR only two arguements for the multi-source flag\n"; exit (EXIT_FAILURE); } params.multi_src_params.multi_source = true; params.multi_src_params.file_name = multi_source[0]; params.multi_src_params.source_model_name = multi_source[1]; } //some basic error checks to parameters if(params.learning_rate<=0) { BZ_CUDA::logger << "ERROR: you cannot have a learning rate <=0\n"; exit (EXIT_FAILURE); } if(params.minibatch_size<=0) { BZ_CUDA::logger << "ERROR: you cannot have a minibatch of size <=0\n"; exit (EXIT_FAILURE); } if(params.LSTM_size<=0) { BZ_CUDA::logger << "ERROR: you cannot have a hiddenstate of size <=0\n"; exit (EXIT_FAILURE); } if(params.source_vocab_size<=0) { if(params.source_vocab_size!=-1) { BZ_CUDA::logger << "ERROR: you cannot have a source_vocab_size <=0\n"; exit (EXIT_FAILURE); } } if(params.target_vocab_size<=0) { if(params.target_vocab_size!=-1) { BZ_CUDA::logger << "ERROR: you cannot have a target_vocab_size <=0\n"; exit (EXIT_FAILURE); } } if(params.norm_clip<=0) { BZ_CUDA::logger << "ERROR: you cannot have your norm clip <=0\n"; exit (EXIT_FAILURE); } if(params.num_epochs<=0) { BZ_CUDA::logger << "ERROR: you cannot have num_epochs <=0\n"; exit (EXIT_FAILURE); } // if(vm.count("logfile")) { // params.HPC_output = true; // BZ_CUDA::HPC_output = true; // } if(vm.count("dropout")) { params.dropout = true; if(params.dropout_rate < 0 || params.dropout_rate > 1) { BZ_CUDA::logger << "ERROR: dropout rate must be between 0 and 1\n"; exit (EXIT_FAILURE); } } if(vm.count("matrix-clip-gradients")) { BZ_CUDA::global_clip_flag = false; params.clip_gradient = true; BZ_CUDA::individual_grad_clip = false; } if(vm.count("whole-clip-gradients")) { BZ_CUDA::global_clip_flag = true; params.clip_gradient = false; BZ_CUDA::individual_grad_clip = false; } if(vm.count("ind-clip-gradients")) { BZ_CUDA::global_clip_flag = false; params.clip_gradient = false; BZ_CUDA::individual_grad_clip = true; } if(vm.count("NCE")) { params.NCE = true; params.softmax = false; //BZ_CUDA::print_partition_function = true; } if(vm.count("UNK-replacement")) { params.unk_replace = true; } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); //BZ_CUDA::logger << "Unique_dir: " << params.unique_dir << "\n"; params.train_file_name = params.unique_dir+"/train.txt"; //number of layers //error checking is done when initializing model if(vm.count("multi-gpu")) { params.gpu_indicies = gpu_indicies; } if(vm.count("cont-train")) { //sequence model if(vm.count("sequence")) { if(cont_train.size()!=2) { BZ_CUDA::logger << (int)cont_train.size() << "\n"; BZ_CUDA::logger << "ERROR: two arguements to be supplied to the continue train flag\n"\ " 1. train data file name, 2. neural network file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.attent_params.attention_model = false; params.target_file_name = cont_train[0]; params.input_weight_file = cont_train[1]; params.output_weight_file = cont_train[1]; params.LM = true; params.load_model_train = true; params.load_model_name = params.input_weight_file; input_file_prep input_helper; input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.train_file_name, params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers); } else { if(cont_train.size()!=3) { BZ_CUDA::logger << "ERROR: three arguements to be supplied to the continue train flag\n"\ " 1. source train data file name 2. target train data file name 3. neural network file name \n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.LM = false; params.source_file_name = cont_train[0]; params.target_file_name = cont_train[1]; params.input_weight_file = cont_train[2]; params.output_weight_file = cont_train[2]; params.load_model_train = true; params.load_model_name = params.input_weight_file; BZ_CUDA::logger << "Load model name: " << params.load_model_name << "\n"; if(params.source_file_name == params.target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } input_file_prep input_helper; if(vm.count("multi-source")) { params.multi_src_params.int_file_name = params.unique_dir + params.multi_src_params.int_file_name; } if(params.char_params.char_cnn) { params.train_file_name = params.char_params.word_train_file; params.test_file_name = params.char_params.word_dev_file; params.output_weight_file = params.char_params.word_mapping_file; } else { input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name, params.target_file_name,params.train_file_name,params.longest_sent,params.minibatch_size,params.LSTM_size, params.source_vocab_size,params.target_vocab_size,params.num_layers,params.attent_params.attention_model, params.multi_src_params.multi_source,params.multi_src_params.file_name,params.multi_src_params.int_file_name, params.multi_src_params.source_model_name); } } } else { if(vm.count("num-layers")) { if(params.num_layers <=0) { BZ_CUDA::logger << "ERROR: you must have >= 1 layer for your model\n"; exit (EXIT_FAILURE); } } //now create the necessary files if(vm.count("sequence")) { if(train_files.size()!=2) { BZ_CUDA::logger << "ERROR: two arguements to be supplied to the train flag"\ " 1. train data file name, 2. neural network output name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.attent_params.attention_model = false; params.LM = true; params.target_file_name = train_files[0]; params.output_weight_file = train_files[1]; input_file_prep input_helper; if(vm.count("vocab-mapping-file")) { params.ensemble_train = true; } //this outputs the train.txt file along with the mappings and first line bool success=true; if(!params.ensemble_train) { success = input_helper.prep_files_train_LM(params.minibatch_size,params.longest_sent, params.target_file_name, params.train_file_name,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers); } else { success = input_helper.prep_files_train_LM_ensemble(params.minibatch_size,params.longest_sent, params.target_file_name, params.train_file_name,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name); } //clean up if error if(!success) { boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } else { //then sequence to sequence model if(train_files.size()!=3) { BZ_CUDA::logger << (int)train_files.size() <<"\n"; BZ_CUDA::logger << "ERROR: three arguements to be supplied to the train flag for the sequence to sequence model\n"\ " 1. source train data file name\n 2. target train data file name \n3. neural network output name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.LM = false; params.source_file_name = train_files[0]; params.target_file_name = train_files[1]; params.output_weight_file = train_files[2]; if(params.source_file_name == params.target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } //see if ensemble training if(vm.count("vocab-mapping-file")) { params.ensemble_train = true; } input_file_prep input_helper; bool success=true; //check if char if(params.char_params.char_cnn) { params.train_file_name = params.char_params.word_train_file; params.test_file_name = params.char_params.word_dev_file; params.output_weight_file = params.char_params.word_mapping_file; } else { if(params.multi_src_params.multi_source) { params.multi_src_params.int_file_name = params.unique_dir + params.multi_src_params.int_file_name; if(params.ensemble_train) { input_helper.prep_files_train_nonLM_multi_source_ensemble(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size, params.num_layers,params.multi_src_params.file_name,params.multi_src_params.int_file_name, params.multi_src_params.source_model_name,params.ensemble_train_file_name,params.multi_src_params.ensemble_train_file_name); } else { input_helper.prep_files_train_nonLM_multi_source(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size, params.num_layers,params.multi_src_params.file_name,params.multi_src_params.int_file_name, params.multi_src_params.source_model_name); } } else if(!params.ensemble_train) { success = input_helper.prep_files_train_nonLM(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.unk_replace,params.unk_aligned_width,params.attent_params.attention_model); } else { success = input_helper.prep_files_train_nonLM_ensemble(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name,params.attent_params.attention_model); } } //clean up if error if(!success) { boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } } if(vm.count("parameter-range")) { if(lower_upper_range.size()!=2) { BZ_CUDA::logger << "ERROR: you must have two inputs to parameter-range\n1.lower bound\n2. upper bound\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } BZ_CUDA::lower = lower_upper_range[0]; BZ_CUDA::upper = lower_upper_range[1]; if(BZ_CUDA::lower >= BZ_CUDA::upper) { BZ_CUDA::logger << "ERROR: the lower parameter range cannot be greater than the upper range\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } if(vm.count("fixed-halve-lr-full")) { params.stanford_learning_rate = true; } if(vm.count("fixed-halve-lr")) { params.google_learning_rate = true; if(params.epoch_to_start_halving<=0) { BZ_CUDA::logger << "ERROR: cannot halve learning rate until 1st epoch \n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } if(vm.count("adaptive-halve-lr")) { params.learning_rate_schedule = true; if(vm.count("sequence")) { if(adaptive_learning_rate.size()!=1) { BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes one arguement\n1.dev file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.dev_target_file_name = adaptive_learning_rate[0]; params.test_file_name = params.unique_dir + "/validation.txt"; input_file_prep input_helper; if(!params.char_params.char_cnn) { input_helper.integerize_file_LM(params.output_weight_file,params.dev_target_file_name,params.test_file_name, params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers); } } else { if(adaptive_learning_rate.size()!=2 && !params.multi_src_params.multi_source) { BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes two arguements\n1.source dev file name\n2.target dev file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(adaptive_learning_rate.size()!=3 && params.multi_src_params.multi_source) { BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes three arguements with multi-source\n1.source dev file name\n2.target dev file name\n3.other source dev file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(params.multi_src_params.multi_source) { params.multi_src_params.test_file_name = adaptive_learning_rate[2]; } params.dev_source_file_name = adaptive_learning_rate[0]; params.dev_target_file_name = adaptive_learning_rate[1]; params.test_file_name = params.unique_dir + "/validation.txt"; params.multi_src_params.int_file_name_test = params.unique_dir + params.multi_src_params.int_file_name_test; if(params.char_params.char_cnn) { params.train_file_name = params.char_params.word_train_file; params.test_file_name = params.char_params.word_dev_file; } if(params.dev_source_file_name == params.dev_target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } input_file_prep input_helper; if(!params.char_params.char_cnn) { input_helper.integerize_file_nonLM(params.output_weight_file,params.dev_source_file_name, params.dev_target_file_name,params.test_file_name, params.longest_sent,params.minibatch_size,params.LSTM_size,params.source_vocab_size,params.target_vocab_size,params.num_layers, params.attent_params.attention_model,params.multi_src_params.multi_source,params.multi_src_params.test_file_name,params.multi_src_params.int_file_name_test,params.multi_src_params.source_model_name); } } if(vm.count("best-model")) { params.best_model = true; } } if(vm.count("truncated-softmax")) { params.shortlist_size = std::stoi(trunc_info[0]); params.sampled_size = std::stoi(trunc_info[1]); params.truncated_softmax = true; if(params.shortlist_size + params.sampled_size > params.target_vocab_size) { BZ_CUDA::logger << "ERROR: you cannot have shortlist size + sampled size >= target vocab size\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } //put in the first line of the model file with the correct info //format: //0: num_layers //1: LSTM_size //2: target_vocab_size //3: source_vocab_size //4: attention_model //5: feed_input //6: multi_source //7: combine_LSTM //8: char_cnn add_model_info(params.num_layers,params.LSTM_size,params.target_vocab_size,params.source_vocab_size,params.attent_params.attention_model,params.attent_params.feed_input,\ params.multi_src_params.multi_source,params.multi_src_params.lstm_combine,params.char_params.char_cnn,params.output_weight_file); params.train= true; params.decode=false; params.test = false; params.stochastic_generation = false; return; } else { //checks here for things that should only be specified during training if(vm.count("train-source-RNN")) { std::cout << "Error train-source-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-RNN")) { std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-source-input-embedding")) { std::cout << "Error train-source-input-embedding should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-input-embedding")) { std::cout << "Error train-target-input-embedding should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-output-embedding")) { std::cout << "Error train-target-output-embedding should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-train-attention-target-RNN")) { std::cout << "Error train-train-attention-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("vocab-mapping-file-multi-source")) { std::cout << "Error vocab-mapping-file-multi-source should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("multi-source")) { std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-RNN")) { std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("multi-attention")) { std::cout << "Error multi-attention should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("lstm-combine")) { std::cout << "Error lstm-combine should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("num-layers")) { std::cout << "Error num-layers should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("dropout")) { std::cout << "Error dropout should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("learning-rate")) { std::cout << "Error learning-rate should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("random-seed")) { std::cout << "Error random-seed should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("hiddenstate-size")) { std::cout << "Error hiddenstate-size should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("NCE")) { std::cout << "Error NCE should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("NCE-share-samples")) { std::cout << "Error NCE-share-samples should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("attention-model")) { std::cout << "Error attention-model should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("attention-width")) { std::cout << "Error attention-width should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("feed-input")) { std::cout << "Error feed-input should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("source-vocab-size")) { std::cout << "Error source-vocab-size should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("target-vocab-size")) { std::cout << "Error target-vocab-size should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("parameter-range")) { std::cout << "Error parameter-range should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("number-epochs")) { std::cout << "Error number-epochs should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("matrix-clip-gradients")) { std::cout << "Error matrix-clip-gradients should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("whole-clip-gradients")) { std::cout << "Error whole-clip-gradients should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("adaptive-halve-lr")) { std::cout << "Error adaptive-halve-lr should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("clip-cell")) { std::cout << "Error clip-cell should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("adaptive-decrease-factor")) { std::cout << "Error adaptive-decrease-factor should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("fixed-halve-lr")) { std::cout << "Error fixed-halve-lr should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("fixed-halve-lr-full")) { std::cout << "Error fixed-halve-lr-full should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("screen-print-rate")) { std::cout << "Error screen-print-rate should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("best-model")) { std::cout << "Error best-model should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } } if(vm.count("decode")) { if (kbest_files.size()<3) { BZ_CUDA::logger << "ERROR: at least 4 arguements must be entered for --decode, 1. number of best outputs\n"\ " 2. neural network file name (this is the output file you get after training the neural network)\n"\ " 3. output file name\n"\ "Additionally more neural network file names can be added to do ensemble decoding\n"; exit (EXIT_FAILURE); } //fill into NULL if the user did not specify anything if(params.decode_user_files_additional.size()==0) { for(int i=0; i<params.decode_user_files.size(); i++) { params.decode_user_files_additional.push_back("NULL"); } } //once again fill in NULL if user did not specify if(params.model_names_multi_src.size()==0) { for(int i=0; i<params.decode_user_files.size(); i++) { params.model_names_multi_src.push_back("NULL"); } } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); // if(vm.count("tmp-dir-location")) { // params.unique_dir = params.tmp_location + params.unique_dir; // } //for ensembles for(int i=1; i<kbest_files.size()-1; i++) { params.model_names.push_back(kbest_files[i]); std::string temp_path = params.unique_dir+ "/kbest_tmp_" + std::to_string(i-1); params.decode_temp_files.push_back(temp_path); temp_path = params.unique_dir+ "/kbest_tmp_additional_" + std::to_string(i-1); params.decode_temp_files_additional.push_back(temp_path); } //BZ_CUDA::logger << "params.model_names: " << (int)params.model_names.size() << "\n"; //BZ_CUDA::logger << "decode_user_files: " << (int)params.decode_user_files.size() << "\n"; //BZ_CUDA::logger << "model_names_multi_src: " << (int)params.model_names_multi_src.size() << "\n"; if(params.model_names.size() != params.decode_user_files.size() || params.model_names.size() != params.model_names_multi_src.size()) { BZ_CUDA::logger << "ERROR: the same number of inputs must be specified as models\n"; exit (EXIT_FAILURE); } //params.decode_file_name = params.unique_dir+"/decoder_input.txt"; params.decoder_output_file = params.unique_dir+"/decoder_output.txt"; params.num_hypotheses =std::stoi(kbest_files[0]); //params.decode_tmp_file = kbest_files[1]; //params.input_weight_file = model_names[0]; params.decoder_final_file = kbest_files.back(); input_file_prep input_helper; // input_helper.integerize_file_LM(params.input_weight_file,params.decode_tmp_file,"tmp/decoder_input.txt", // params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,true,params.source_vocab_size); for(int i=0; i<params.decode_temp_files.size(); i++) { input_helper.integerize_file_kbest(params.model_names[i],params.decode_user_files[i],params.decode_temp_files[i], params.longest_sent,params.target_vocab_size,false,"NULL"); if(params.decode_user_files_additional[i]!= "NULL") { input_helper.integerize_file_kbest(params.model_names[i],params.decode_user_files_additional[i],params.decode_temp_files_additional[i], params.longest_sent,params.target_vocab_size,true,params.model_names_multi_src[i]); } } if(vm.count("multi-gpu")) { if(gpu_indicies.size()!=params.model_names.size()) { BZ_CUDA::logger << "ERROR: for decoding, each model must be specified a gpu\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.gpu_indicies = gpu_indicies; } else { for(int i=0; i<params.model_names.size(); i++) { params.gpu_indicies.push_back(0); } } if(params.beam_size<=0) { BZ_CUDA::logger << "ERROR: beam size cannot be <=0\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(params.penalty<0) { BZ_CUDA::logger << "ERROR: penalty cannot be less than zero\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(vm.count("Dump-LSTM")) { params.dump_LSTM=true; } if(vm.count("dec-ratio")) { if(decoding_ratio.size()!=2) { BZ_CUDA::logger << "Decoding ratio size: " << (int)decoding_ratio.size() << "\n"; BZ_CUDA::logger << decoding_ratio[0] << "\n"; BZ_CUDA::logger << "ERROR: only two inputs for decoding ratio\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.min_decoding_ratio = decoding_ratio[0]; params.max_decoding_ratio = decoding_ratio[1]; if(params.min_decoding_ratio >= params.max_decoding_ratio) { BZ_CUDA::logger << "ERROR: min decoding ratio must be <= max_decoding_ratio\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } params.train = false; params.decode = true; params.test = false; params.stochastic_generation = false; params.LM = false; return; } if(vm.count("force-decode")) { BZ_CUDA::force_decode = true; if(vm.count("multi-gpu")) { params.gpu_indicies = gpu_indicies; } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); // if(vm.count("tmp-dir-location")) { // params.unique_dir = params.tmp_location + params.unique_dir; // } params.test_file_name = params.unique_dir + "/validation.txt"; if(vm.count("sequence")) { if(test_files.size()!=3) { BZ_CUDA::logger << "ERROR: force-decode takes three arguements 1.input file name (input sentences)"\ "2. neural network file name 3.output file name \n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.attent_params.attention_model = false; params.target_file_name = test_files[0]; params.input_weight_file = test_files[1]; params.output_force_decode = test_files[2]; params.LM = true; input_file_prep input_helper; input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.test_file_name, params.longest_sent,params.minibatch_size,false,params.LSTM_size,params.target_vocab_size,params.num_layers); } else { if(test_files.size()!=4) { BZ_CUDA::logger << "ERROR: force-decode takes four arguements: 1. source input file"\ " 2. target input file 3. neural network file name 4. output file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.LM = false; params.source_file_name = test_files[0]; params.target_file_name = test_files[1]; params.input_weight_file = test_files[2]; params.output_force_decode = test_files[3]; //stuff for attention model alignments params.attent_params.tmp_alignment_file = params.unique_dir + "/alignments.txt"; if(params.source_file_name == params.target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(vm.count("multi-source")) { if(multi_source.size()!=2) { BZ_CUDA::logger << "ERROR only two arguements for the multi-source flag\n"; exit (EXIT_FAILURE); } params.multi_src_params.multi_source = true; params.multi_src_params.test_file_name = multi_source[0]; params.multi_src_params.source_model_name = multi_source[1]; params.multi_src_params.int_file_name_test = params.unique_dir + params.multi_src_params.int_file_name_test; } if(!params.char_params.char_cnn) { input_file_prep input_helper; input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name, params.target_file_name,params.test_file_name,params.longest_sent,1,params.LSTM_size, params.source_vocab_size,params.target_vocab_size,params.num_layers,params.attent_params.attention_model, params.multi_src_params.multi_source,params.multi_src_params.test_file_name,params.multi_src_params.int_file_name_test, params.multi_src_params.source_model_name); } else { params.test_file_name = params.char_params.word_dev_file; } params.minibatch_size=1; } std::ifstream tmp_if_stream(params.input_weight_file.c_str()); std::string tmp_str; std::string tmp_word; std::getline(tmp_if_stream,tmp_str); std::istringstream my_ss(tmp_str,std::istringstream::in); std::vector<std::string> tmp_model_params; while(my_ss >> tmp_word) { tmp_model_params.push_back(tmp_word); } if(tmp_model_params.size() != 9) { BZ_CUDA::logger << "Error: the model file is not in the correct format for force-decode\n"; exit (EXIT_FAILURE); } params.num_layers = std::stoi(tmp_model_params[0]); params.LSTM_size = std::stoi(tmp_model_params[1]); params.target_vocab_size = std::stoi(tmp_model_params[2]); params.source_vocab_size = std::stoi(tmp_model_params[3]); params.attent_params.attention_model = std::stoi(tmp_model_params[4]); params.attent_params.feed_input = std::stoi(tmp_model_params[5]); params.multi_src_params.multi_source = std::stoi(tmp_model_params[6]); params.multi_src_params.lstm_combine = std::stoi(tmp_model_params[7]); params.char_params.char_cnn = std::stoi(tmp_model_params[8]); params.train= false; params.decode=false; params.test = true; // params.minibatch_size=1; params.stochastic_generation = false; return; } if(vm.count("stoch-gen")) { if(!vm.count("sequence")) { BZ_CUDA::logger << "ERROR: you can only do stoch-gen on the sequence model\n"; exit (EXIT_FAILURE); } if(stoicgen_files.size()!=2) { BZ_CUDA::logger << "ERROR: stoch-gen takes two inputs"\ " 1. neural network file name 2. output file name\n"; exit (EXIT_FAILURE); } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); // if(vm.count("tmp-dir-location")) { // params.unique_dir = params.tmp_location + params.unique_dir; // } params.sg_output_file_temp = params.unique_dir + "/sg.txt"; params.input_weight_file = stoicgen_files[0]; params.sg_output_file = stoicgen_files[1]; std::ifstream weights_file; std::vector<std::string> info; std::string str; std::string word; weights_file.open(params.input_weight_file.c_str()); weights_file.seekg(0, std::ios::beg); std::getline(weights_file, str); //info from first sentence std::istringstream iss(str, std::istringstream::in); while(iss >> word) { info.push_back(word); } weights_file.close(); params.LSTM_size = std::stoi(info[1]); params.target_vocab_size = std::stoi(info[2]); params.LM = true; params.train= false; params.decode = false; params.test = false; params.minibatch_size = 1; params.stochastic_generation = true; return; } } catch(po::error& e) { std::cerr << "ERROR: " << e.what() << std::endl << std::endl; //std::cerr << desc << std::endl; exit (EXIT_FAILURE); } } void myexitfunc(void) { } int main(int argc, char **argv) { //Timing stuff std::chrono::time_point<std::chrono::system_clock> start_total, end_total, begin_minibatch,end_minibatch,begin_decoding,end_decoding,begin_epoch; std::chrono::duration<double> elapsed_seconds; start_total = std::chrono::system_clock::now(); //Initializing the model global_params params; //Declare all of the global parameters //create tmp directory if it does not exist already // if( !(boost::filesystem::exists("tmp/"))) { // std::cout << "Creating tmp directory for program\n"; // boost::filesystem::create_directory("tmp/"); // } //atexit(); //this is used to clean up the end of the code //file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file); //Initialize the file information BZ_CUDA::curr_seed = static_cast<unsigned int>(std::time(0)); BZ_CUDA::curr_seed = ::min((unsigned int)100000000,BZ_CUDA::curr_seed);//to prevent overflow //get the command line arguements command_line_parse(params,argc,argv); // if(params.HPC_output) { // std::cout << "Opening logfile: " << params.HPC_output_file_name << "\n"; // HPC_output.open(params.HPC_output_file_name); // } //randomize the seed if(params.random_seed) { BZ_CUDA::gen.seed(static_cast<unsigned int>(params.random_seed_int)); } else { BZ_CUDA::gen.seed(static_cast<unsigned int>(std::time(0))); } neuralMT_model<precision> model; //This is the model printIntroMessage(params); if(!params.decode) { model.initModel(params.LSTM_size,params.minibatch_size,params.source_vocab_size,params.target_vocab_size, params.longest_sent,params.debug,params.learning_rate,params.clip_gradient,params.norm_clip, params.input_weight_file,params.output_weight_file,params.softmax_scaled,params.train_perplexity,params.truncated_softmax, params.shortlist_size,params.sampled_size,params.LM,params.num_layers,params.gpu_indicies,params.dropout, params.dropout_rate,params.attent_params,params); } if(params.load_model_train) { std::string temp_swap_weights = model.input_weight_file; model.input_weight_file = params.load_model_name; model.load_weights(); model.input_weight_file = temp_swap_weights; } ////////////////////////////////////Train the model////////////////////////////////////// if(params.train) { //info for averaging the speed int curr_batch_num_SPEED = 0; const int thres_batch_num_SPEED = params.screen_print_rate;//set this to whatever int total_words_batch_SPEED = 0; double total_batch_time_SPEED = 0; //File info for the training file file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,params.train_total_words,params.truncated_softmax, params.shortlist_size,params.sampled_size,params.char_params,params.char_params.char_train_file); //Initialize the file information //model.initFileInfo(&file_info); params.half_way_count = params.train_total_words/2; if(params.google_learning_rate) { BZ_CUDA::logger << "Number of words at which to start halving the learning rate: " << params.half_way_count << "\n"; // if(params.HPC_output) { // HPC_output << "Words at which to start halving the learning rate: " << params.half_way_count << "\n"; // HPC_output.flush(); // } } int current_epoch = 1; BZ_CUDA::logger << "Starting model training\n"; BZ_CUDA::logger << "-----------------------------------" << "\n"; BZ_CUDA::logger << "Starting epoch 1\n"; BZ_CUDA::logger << "-----------------------------------" << "\n"; // if(params.HPC_output) { // HPC_output << "Starting model training\n"; // HPC_output << "Starting epoch 1\n"; // HPC_output.flush(); // } //stuff for learning rate schedule int total_words = 0; precision temp_learning_rate = params.learning_rate; //This is only for the google learning rate bool learning_rate_flag =true;//used for google learning rate for halving at every 0.5 epochs double old_perplexity = 0; model.train_perplexity = 0; //set the model perplexity to zero begin_epoch = std::chrono::system_clock::now(); while(current_epoch <= params.num_epochs) { begin_minibatch = std::chrono::system_clock::now(); bool success = file_info.read_minibatch(); if(model.multi_source) { model.src_fh.read_minibatch(); } end_minibatch = std::chrono::system_clock::now(); elapsed_seconds = end_minibatch-begin_minibatch; //std::cout << "File I/O time: " << elapsed_seconds.count()/60.0 << " minutes\n"; total_batch_time_SPEED+= elapsed_seconds.count(); begin_minibatch = std::chrono::system_clock::now(); //hipProfilerStart(); model.initFileInfo(&file_info); model.compute_gradients(file_info.minibatch_tokens_source_input,file_info.minibatch_tokens_source_output, file_info.minibatch_tokens_target_input,file_info.minibatch_tokens_target_output, file_info.h_input_vocab_indicies_source,file_info.h_output_vocab_indicies_source, file_info.h_input_vocab_indicies_target,file_info.h_output_vocab_indicies_target, file_info.current_source_length,file_info.current_target_length, file_info.h_input_vocab_indicies_source_Wgrad,file_info.h_input_vocab_indicies_target_Wgrad, file_info.len_source_Wgrad,file_info.len_target_Wgrad,file_info.h_sampled_indices, file_info.len_unique_words_trunc_softmax,file_info.h_batch_info,&file_info); //hipProfilerStop(); //return; // return 0; end_minibatch = std::chrono::system_clock::now(); elapsed_seconds = end_minibatch-begin_minibatch; total_batch_time_SPEED+= elapsed_seconds.count(); total_words_batch_SPEED+=file_info.words_in_minibatch; if(curr_batch_num_SPEED>=thres_batch_num_SPEED) { BZ_CUDA::logger << "Recent batch gradient L2 norm size (if using -w): " << BZ_CUDA::global_norm << "\n"; BZ_CUDA::logger << "Time to compute gradients for previous " << params.screen_print_rate << " minibatches: " << total_batch_time_SPEED/60.0 << " minutes\n"; BZ_CUDA::logger << "Number of words in previous " << params.screen_print_rate << " minibatches: " << total_words_batch_SPEED << "\n"; BZ_CUDA::logger << "Throughput for previous " << params.screen_print_rate << " minibatches: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n"; BZ_CUDA::logger << total_words << " words out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n"; // if(params.HPC_output) { // HPC_output << "Recent batch gradient L2 norm size: " << BZ_CUDA::global_norm << "\n"; // HPC_output << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n"; // HPC_output << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n"; // HPC_output << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n"; // HPC_output << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n"; // HPC_output.flush(); // } total_words_batch_SPEED = 0; total_batch_time_SPEED = 0; curr_batch_num_SPEED = 0; } curr_batch_num_SPEED++; total_words += file_info.words_in_minibatch; //stuff for google learning rate if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving && total_words>=params.half_way_count && learning_rate_flag) { temp_learning_rate = temp_learning_rate/2; BZ_CUDA::logger << "New Learning Rate: " << temp_learning_rate << "\n"; model.update_learning_rate(temp_learning_rate); learning_rate_flag = false; // if(params.HPC_output) { // HPC_output << "New Learning Rate: " << temp_learning_rate << "\n"; // HPC_output.flush(); // } } //stuff for perplexity based learning schedule if(params.learning_rate_schedule && total_words>=params.half_way_count &&learning_rate_flag) { learning_rate_flag = false; double new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,false,params.test_total_words,params.HPC_output,false,""); BZ_CUDA::logger << "Old dev set Perplexity: " << old_perplexity << "\n"; BZ_CUDA::logger << "New dev set Perplexity: " << new_perplexity << "\n"; // if(params.HPC_output) { // HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n"; // HPC_output << "New dev set Perplexity: " << new_perplexity << "\n"; // HPC_output.flush(); // } if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) { temp_learning_rate = temp_learning_rate*params.decrease_factor; model.update_learning_rate(temp_learning_rate); BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } //perplexity is better so output the best model file if((params.best_model && params.best_model_perp > new_perplexity) || BZ_CUDA::dump_every_best) { //BZ_CUDA::logger << "Writing model file: "<< params.best_model_file_name <<"\n"; model.dump_best_model(params.best_model_file_name,params.output_weight_file); // if(params.HPC_output) { // HPC_output << "Now outputting the new best model\n"; // HPC_output.flush(); // } params.best_model_perp = new_perplexity; } old_perplexity = new_perplexity; } if(!success) { current_epoch+=1; //stuff for google learning rate schedule if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving) { temp_learning_rate = temp_learning_rate/2; BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; model.update_learning_rate(temp_learning_rate); learning_rate_flag = true; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } //stuff for stanford learning rate schedule if(params.stanford_learning_rate && current_epoch>=params.epoch_to_start_halving_full) { temp_learning_rate = temp_learning_rate/2; BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; model.update_learning_rate(temp_learning_rate); learning_rate_flag = true; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } double new_perplexity; if(params.learning_rate_schedule) { new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,false,params.test_total_words,params.HPC_output,false,""); } //stuff for perplexity based learning schedule if(params.learning_rate_schedule) { BZ_CUDA::logger << "Old dev set Perplexity: " << old_perplexity << "\n"; BZ_CUDA::logger << "New dev set Perplexity: " << new_perplexity << "\n"; // if(params.HPC_output) { // HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n"; // HPC_output << "New dev set Perplexity: " << new_perplexity << "\n"; // HPC_output.flush(); // } if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) { temp_learning_rate = temp_learning_rate*params.decrease_factor; model.update_learning_rate(temp_learning_rate); BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } //perplexity is better so output the best model file if( (params.best_model && params.best_model_perp > new_perplexity) || BZ_CUDA::dump_every_best) { //BZ_CUDA::logger << "Now outputting the new best model\n"; model.dump_best_model(params.best_model_file_name,params.output_weight_file); // if(params.HPC_output) { // HPC_output << "Now outputting the new best model\n"; // HPC_output.flush(); // } params.best_model_perp = new_perplexity; } learning_rate_flag = true; old_perplexity = new_perplexity; } if(params.train_perplexity) { model.train_perplexity = model.train_perplexity/::log(2.0); BZ_CUDA::logger << "PData on train set: " << model.train_perplexity << "\n"; BZ_CUDA::logger << "Total target words: " << file_info.total_target_words << "\n"; BZ_CUDA::logger << "Training set perplexity: " << ::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n"; // if(params.HPC_output) { // HPC_output << "Training set perplexity: " << ::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n"; // HPC_output.flush(); // } model.train_perplexity = 0; } total_words=0; if(current_epoch <= params.num_epochs) { elapsed_seconds = std::chrono::system_clock::now() - begin_epoch; BZ_CUDA::logger << "Previous Epoch time (minutes): " << (double)elapsed_seconds.count()/60.0 << "\n"; begin_epoch = std::chrono::system_clock::now(); BZ_CUDA::logger << "-----------------------------------" << "\n"; BZ_CUDA::logger << "Starting epoch " << current_epoch << "\n"; BZ_CUDA::logger << "-----------------------------------" << "\n"; // if(params.HPC_output) { // HPC_output << "-----------------------------------" << std::endl; // HPC_output << "Starting epoch " << current_epoch << std::endl; // HPC_output << "-----------------------------------" << std::endl; // HPC_output.flush(); // } } } devSynchAll(); } //Now that training is done, dump the weights devSynchAll(); model.dump_weights(); } /////////////////////////////////Get perplexity on test set//////////////////////////////// if(params.test) { model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,true,params.test_total_words,params.HPC_output,true,params.output_force_decode); //now unint alignments if(model.attent_params.dump_alignments) { input_file_prep input_helper; model.output_alignments.close(); //input_helper.unint_alignments(params.input_weight_file,params.attent_params.tmp_alignment_file,params.attent_params.alignment_file); } } if(params.LM && params.stochastic_generation) { model.stoicastic_generation(params.sg_length,params.sg_output_file_temp,params.temperature); input_file_prep input_helper; input_helper.unint_file(params.input_weight_file,params.sg_output_file_temp,params.sg_output_file,true,false); } ///////////////////////////////////////////decode the model//////////////////////////////////////////// if(params.decode) { //std::cout << "-----------------Starting Decoding----------------\n"; begin_decoding = std::chrono::system_clock::now(); ensemble_factory<precision> ensemble_decode(params.model_names,params.num_hypotheses,params.beam_size, params.min_decoding_ratio, params.penalty, params.longest_sent,params.print_score, params.decoder_output_file,params.gpu_indicies,params.max_decoding_ratio, params.target_vocab_size,params); BZ_CUDA::logger << "-----------------Starting Decoding----------------\n"; ensemble_decode.decode_file(); end_decoding = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds = end_decoding-begin_decoding; BZ_CUDA::logger << "Decoding time: " << elapsed_seconds.count()/60.0 << " minutes\n"; //now unintegerize the file input_file_prep input_helper; //use model_names[0] since all models must have the same target vocab mapping and size input_helper.unint_file(params.model_names[0],params.decoder_output_file,params.decoder_final_file,false,true); } //remove the temp directory created if(params.unique_dir!="NULL") { boost::filesystem::path temp_path(params.unique_dir); //boost::filesystem::remove_all(temp_path); } //Compute the final runtime end_total = std::chrono::system_clock::now(); elapsed_seconds = end_total-start_total; BZ_CUDA::logger << "\n\n\n"; BZ_CUDA::logger << "Total Program Runtime: " << (double)elapsed_seconds.count()/60.0 << " minutes" << "\n"; }
b8e5e992190061a1f55020071ad9344feca293f4.cu
#include <iostream> #include <vector> #include <time.h> #include <cmath> #include <chrono> #include <iomanip> #include <fstream> #include <cudnn.h> #include "math_constants.h" //Eigen includes #include <Eigen/Dense> #include <Eigen/Sparse> //Boost #include "boost/program_options.hpp" #include <boost/filesystem/operations.hpp> #include <boost/filesystem/path.hpp> //My own includes #include "add_model_info.h" #include "logger.h" #include "global_params.h" #include "prev_states.h" #include "input_file_prep.h" #include "BZ_CUDA_UTIL.h" #include "conv_char.h" #include "encoder_multi_source.h" #include "bi_encoder.h" #include "attention_layer.h" #include "attention_node.h" #include "attention_combiner.h" #include "decoder_model_wrapper.h" #include "ensemble_factory.h" #include "base_layer.h" #include "NCE.h" #include "gpu_info_struct.h" #include "custom_kernels.h" #include "Hidden_To_Hidden_Layer.h" #include "LSTM_HH.h" #include "model.h" #include "fileHelper.h" #include "fileHelper_source.h" #include "Eigen_Util.h" #include "model.hpp" //#include "base_layer.hpp" #include "LSTM.hpp" #include "softmax.hpp" #include "Input_To_Hidden_Layer.hpp" #include "Hidden_To_Hidden_Layer.hpp" #include "LSTM_HH.hpp" #include "decoder_model_wrapper.hpp" #include "ensemble_factory.hpp" #include "attention_layer.hpp" #include "attention_node.hpp" #include "NCE.hpp" #include "bi_encoder.hpp" #include "encoder_multi_source.hpp" #include "tree_LSTM.hpp" #include "input_file_prep.hpp" #include "attention_combiner.hpp" #include "attention_combiner_node.hpp" #include "conv_char.hpp" #include "highway_network.hpp" //parse the command line from the user void command_line_parse(global_params &params,int argc, char **argv) { //files for keeping the user input //if not s, 1st source, 2nd target, 3rd output weights name //if s, 1st target, 2nd output weights name std::vector<std::string> train_files; //files for force decoding //if not s, 1. source input file 2. target input file 3. neural network file name 4. output file name //if s, 1. target input file 2. neural network file name 3. output file name std::vector<std::string> test_files; //stuff for adaptive learning rate schedule //if not seq , 1st is source dev, 2nd is target dev //if seq 1st is target dev std::vector<std::string> adaptive_learning_rate; //lower and upper range for parameter initialization std::vector<precision> lower_upper_range; //for the kbest flag, 4 arguements must be entered for kbest, 1. number of best paths 2 input file name //3. neural network file name (this is the output file you get after training the neural network)4. output file name std::vector<std::string> kbest_files; //for stoic gen, 1st neural network file, 2nd is output file name std::vector<std::string> stoicgen_files; //truncated softmax std::vector<std::string> trunc_info; //for decoding ratios std::vector<precision> decoding_ratio; //for continuing to train std::vector<std::string> cont_train; //for multi gpu training std::vector<int> gpu_indicies; std::vector<precision> clip_cell_vals; std::vector<double> NCE_vals; //for multisource std::vector<std::string> multi_source; //for char-mt std::vector<int> char_mt_vec; //basic format setup namespace po = boost::program_options; po::options_description desc("Options"); desc.add_options() ("help,h", "Run to get help on how to use the program. This is version 1.0") ("train,t",po::value<std::vector<std::string> > (&train_files)->multitoken(),"Train a model with input data file(s) and a name for the neural network output file"\ ". \nFORMAT (if sequence to sequence): <source file name> <target file name> <neural network output name> "\ " \nFORMAT (if sequence): <target file name> <neural network output name>") ("cont-train,C",po::value<std::vector<std::string>> (&cont_train)->multitoken(),"Resume training of a model (THIS WILL OVERWRITE THE MODEL FILE PASSED IN)\n"\ "FORMAT: (if sequence to sequence): <source file name> <target file name> <neural network file name>\n"\ "FORMAT: (if seq): <target file name> <neural network file name>") ("vocab-mapping-file",po::value<std::string> (&params.ensemble_train_file_name),"Train a model with the same integerization mappings as another model. This is needed to do ensemble decoding\n"\ "FORMAT: <neural network file name>") ("train-source-RNN",po::value<bool>(&deniz::train_source_RNN),"train source RNN. DEFAULT: True") ("train-target-RNN",po::value<bool>(&deniz::train_target_RNN),"train target RNN. DEFAULT: True") ("train-source-input-embedding",po::value<bool>(&deniz::train_source_input_embedding),"train source input embeddings. DEFAULT: True") ("train-target-input-embedding",po::value<bool>(&deniz::train_target_input_embedding),"train target input embeddings. DEFAULT: True") ("train-target-output-embedding",po::value<bool>(&deniz::train_target_output_embedding),"train target output embeddings. DEFAULT: True") ("train-attention-target-RNN",po::value<bool>(&deniz::train_attention_target_RNN),"train target attention. DEFAULT: True") ("vocab-mapping-file-multi-source",po::value<std::string> (&params.multi_src_params.ensemble_train_file_name),"specify multi-source mapping for vocab") ("multi-source",po::value<std::vector<std::string>> (&multi_source)->multitoken(),"Specify the second source training file and mapping file for the multi-source model") //("multi-attention",po::value<bool>(&params.multi_src_params.multi_attention),"for attention model with multiple sources\n") ("multi-attention",po::value<bool>(&params.multi_src_params.multi_attention_v2),"Make the multi-source seq-to-seq model use attention\n") //("char-mt",po::value<std::vector<int>> (&char_mt_vec)->multitoken(),"<filter_size> <char_emb_size> <num highway layers> \n") //("add-ht",po::value<bool>(&params.multi_src_params.add_ht),"add hiddenstates for both attention models instead of sending through neural network\n") //("print-norms",po::value<bool>(&BZ_CUDA::print_norms),"Print out norms of all matrices\n") ("lstm-combine",po::value<bool>(&params.multi_src_params.lstm_combine),"For multi source seq-to-seq model, use the child-sum combination method if set to true, else use the basic method. DEFAULT: false\n") ("num-layers,N",po::value<int>(&params.num_layers),"Set the number of LSTM layers you want for your model\n DEFAULT: 1") ("multi-gpu,M",po::value<std::vector<int>> (&gpu_indicies)->multitoken(), "Train the model on multiple gpus.\nFORMAT: <gpu for layer 1> <gpu for layer 2> ... <gpu for softmax>\n"\ "DEFAULT: all layers and softmax lie on gpu 0") ("force-decode,f",po::value<std::vector<std::string> > (&test_files)->multitoken(), "Get per line probability of dataset plus the perplexity\n"\ "FORMAT: (if sequence to sequence): <source file name> <target file name> <trained neural network file name> <output file name>\n"\ "FORMAT: (if sequence): <target file name> <trained neural network file name> <output file name>") // ("stoch-gen,g", po::value<std::vector<std::string> > (&stoicgen_files)->multitoken(),"Do random generation for a sequence model, such as a language model\n"\ // "FORMAT: <neural network file name> <output file name>") // ("stoch-gen-len",po::value<int>(&params.sg_length) ,"How many sentences to let stoch-gen run for\n"\ // "FORMAT: <num sentences>\n" // "DEFAULT: 100") //("dump-alignments",po::value<bool>(&params.attent_params.dump_alignments),"Dump the alignments to a file") // ("temperature",po::value<double>(&params.temperature) ,"What should the temperature be for the stoch generation"\ // "FORMAT: <temperature> where temperature is typically between [0,1]. A lower temperature makes the model output less and less from what it memorized from training\n"\ // "DEFAULT: 1") ("sequence,s", "Train model that learns a sequence,such as language modeling. Default model is sequence to sequence model") ("tmp-dir-location",po::value<std::string>(&params.tmp_location),"For all modes in the code, a tmp directiory must be created for data preparation. Specify the location of where you want this to be created. DEFAULT: Current directory") //("bi-directional",po::value<bool>(&params.bi_dir_params.bi_dir),"Have the source sequence be encoded bi-diretionally\n") //("combine-bi-directional",po::value<bool>(&params.bi_dir_params.bi_dir_comb),"send a nonlinear tranformation of the rev and nonrev hidden states from the source encoders to the decoder\n") //("share-embedding",po::value<bool>(&params.bi_dir_params.share_embeddings),"For the bidirectional encoder, share the embeddings") ("dropout,d",po::value<precision>(&params.dropout_rate),"Use dropout and set the dropout rate. This value is the probability of keeping a node. FORMAT: <dropout rate>. DEFAULT: 1.0") ("learning-rate,l",po::value<precision>(&params.learning_rate),"Set the learning rate. DEFAULT: 0.5") ("random-seed",po::value<int>(&params.random_seed_int),"Specify a random seed, instead of the model being seeded with the current time\n") ("longest-sent,L",po::value<int>(&params.longest_sent),"Set the maximum sentence length for training/force-decode/decode. DEFAULT: 100") ("hiddenstate-size,H",po::value<int>(&params.LSTM_size),"Set hiddenstate size. DEFAULT: 100") //("UNK-replacement",po::value<int>(&params.unk_aligned_width),"Set unk replacement to be true and set the wideth\n FORMAT: <alignment width>") // ("truncated-softmax,T",po::value<std::vector<std::string>> (&trunc_info)->multitoken(),"Use truncated softmax\n DEFAULT: not being used\n"\ // "FORMAT: <shortlist size> <sampled size>") ("UNK-decode",po::value<std::string>(&BZ_CUDA::unk_rep_file_name),"Use unk replacement at decoding time if you have an attention model. Specify a file that the system will output information to. \ This file will then need to be passed to the python script") ("NCE",po::value<int>(&params.num_negative_samples),"Use an NCE loss function, specify the number of noise samples you want (these are shared across the minibatch for speed). DEFAULT: uses MLE not NCE") ("NCE-share-samples",po::value<bool>(&params.share_samples),"Share the noise samples across the minibatch when using NCE for a speed increase. DEFAULT: True ") //("NCE-leg-dump",po::value<bool>(&BZ_CUDA::nce_legacy_dump),"Dont use this option") ("NCE-score",po::value<bool>(&BZ_CUDA::nce_score),"Bool for using unnormalized softmax outputs for force decoding. This will make the probabilities not sum to 1, but makes decoding significanly faster. You must have trained the model with NCE for this to work. DEFAULT: false") //("ASHISH-NCE-STATS",po::value<bool>(&BZ_CUDA::dump_NCE_stats),"for ashish") ("attention-model",po::value<bool>(&params.attent_params.attention_model),"Bool for whether you want to train with the attention mode. DEFAULT: False\n") ("attention-width",po::value<int>(&params.attent_params.D),"How many words do you want to look at around the alignment position on one half. DEFAULT: 10\n") ("feed-input",po::value<bool>(&params.attent_params.feed_input),"Bool for wether you want feed input for the attention model. DEFAULT: False\n") ("source-vocab-size,v",po::value<int>(&params.source_vocab_size),"Set source vocab size\n DEFAULT: number of unique words in source training corpus") ("target-vocab-size,V",po::value<int>(&params.target_vocab_size),"Set target vocab size\n DEFAULT: number of unique words in target training corpus") ("shuffle",po::value<bool>(&params.shuffle),"true if you want to shuffle the train data. DEFAULT: True") ("parameter-range,P",po::value<std::vector<precision> > (&lower_upper_range)->multitoken(),"parameter initialization range\n"\ "FORMAT: <Lower range value> <Upper range value>\n DEFAULT: -0.08 0.08") ("number-epochs,n",po::value<int>(&params.num_epochs),"Set number of epochs. DEFAULT: 10") ("matrix-clip-gradients,c",po::value<precision>(&params.norm_clip),"Set gradient clipping threshold\n DEFAULT: 5") //("ind-clip-gradients,i",po::value<precision>(&BZ_CUDA::ind_norm_clip_thres),"CURRENT THIS DOES NOT WORK!!!!!!!!!!!!!!!!!!! \nSet gradient clipping threshold for individual elements\n DEFAULT: 0.1") ("whole-clip-gradients,w",po::value<precision>(&params.norm_clip),"Set gradient clipping threshold for all gradients\n DEFAULT: 5") ("adaptive-halve-lr,a",po::value<std::vector<std::string>> (&adaptive_learning_rate)->multitoken(),"change the learning rate"\ " when the perplexity on your specified dev set increases from the previous half epoch by some constant, so "\ " new_learning_rate = constant*old_learning rate, by default the constant is 0.5, but can be set using adaptive-decrease-factor\n" "FORMAT: (if sequence to sequence): <source dev file name> <target dev file name>\n"\ "FORMAT: (if sequence): <target dev file name>") ("clip-cell",po::value<std::vector<precision>>(&clip_cell_vals)->multitoken(),"Specify the cell clip threshold and the error threshold in backprop.\n FORMAT: <Cell clip threshold> <Error clip Threshold> . Recommended values: <50> <1000>. DEFAULT: not used\n") ("adaptive-decrease-factor,A",po::value<precision>(&params.decrease_factor),"To be used with adaptive-halve-lr"\ " it\n DEFAULT: 0.5") ("fixed-halve-lr",po::value<int> (&params.epoch_to_start_halving),"Halve the learning rate"\ " after a certain epoch, every half epoch afterwards by a specific amount. FORMAT: <epoch number>") ("fixed-halve-lr-full",po::value<int> (&params.epoch_to_start_halving_full),"Halve the learning rate"\ " after a certain epoch, every epoch afterwards by a specific amount. FORMAT: <epoch number>") ("minibatch-size,m",po::value<int>(&params.minibatch_size),"Set minibatch size. DEFAULT: 8.") ("screen-print-rate",po::value<int>(&params.screen_print_rate),"Set after how many minibatches you want to print info to the stdout and/or the logfile\n DEFAULT: 5") ("logfile",po::value<std::string>(&params.HPC_output_file_name),"Dump the terminal output to a" \ "file \n FORMAT: <file name>") ("best-model,B",po::value<std::string>(&params.best_model_file_name),"During train have the best model (determined by validation perplexity) be written to a file\nFORMAT: <output file name>") ("save-all-models",po::value<bool>(&BZ_CUDA::dump_every_best),"Save the every model every half epoch") ("decode,k",po::value<std::vector<std::string> > (&kbest_files)->multitoken(),"Get top decoding outputs using beam search in sequence to sequence model. You can specify more than one model for ensemble decoding\n"\ "FORMAT: <how many outputs> <neural network file 1> <neural network file 2> ... <output file>") ("decode-main-data-files",po::value<std::vector<std::string> > (&params.decode_user_files)->multitoken(),"FORMAT: <data file 1> <data file 2> ... ") ("decode-multi-source-data-files",po::value<std::vector<std::string> > (&params.decode_user_files_additional)->multitoken(),"FORMAT: <multi-source data file 1> <multi-source data file 2> ... ") ("decode-multi-source-vocab-mappings",po::value<std::vector<std::string> > (&params.model_names_multi_src)->multitoken(),"FORMAT: <multi-source vocab mapping 1> <multi-source vocab mapping 2> ... ") ("pre-norm-ensemble",po::value<bool>(&BZ_CUDA::pre_norm),"For --decode, ensemble the models before they are normalized to probabilities") ("beam-size,b",po::value<int>(&params.beam_size),"Set beam size for --decode paths\n DEFAULT: 12") ("penalty,p",po::value<precision>(&params.penalty),"Set penalty for --decode decoding. The value entered"\ " will be added to the log probability score per target word decoded. This can make the model favor longer sentences for decoding\n DEFAULT: 0") ("print-score",po::value<bool>(&params.print_score),"Set if you want to print out the unnormalized log prob for each path when using --decode"\ "FORMAT: <bool> \nthe bool is 1 if you want to print the score or 0 otherwise.\n DEFAULT: false") ("dec-ratio",po::value<std::vector<precision>>(&decoding_ratio)->multitoken(),"Set the min and max decoding length rations when using --decode\n"\ "This means that a target decoded sentence must be at least min_dec_ratio*len(source sentence)"\ " and not longer than max_dec_ratio*len(source sentence)\nFORMAT: <min ration> <max ratio>\n"\ "DEFAULT: 0.5, 1.5"); // ("tsne-dump",po::value<bool>(&BZ_STATS::tsne_dump),"for dumping multi-source hiddenstates during decoding") // ("Dump-LSTM",po::value<std::string>(&params.LSTM_dump_file),"Print the output at each timestep from the LSTM\nFORMAT: <output file name>\n"\ // "The file lines that are output are the following: 1.input word, embedding 2.Forget gate 3.input gate"\ // " 4.c_t 5.output gate 6.h_t 7.probabilities"); po::variables_map vm; //kbest should be changed to decode. train-emsemble should be changed to vocab-mapping-file. screen-print-rate should be changed //Declare license for the code. LGPL license or MIT license?. try { po::store(po::parse_command_line(argc, argv, desc), vm); po::notify(vm); std::cout << "------------- Printing options that have currently being set by the user -------------\n"; //now try to loop over all the boost program options for (auto it=vm.begin(); it != vm.end(); it++) { std::cout << "Variable: " << it->first << " Value: "; auto& value = it->second.value(); if (auto v = boost::any_cast<int>(&value)) { std::cout << *v << "\n"; } else if (auto v = boost::any_cast<bool>(&value)) { std::cout << *v << "\n"; } else if (auto v = boost::any_cast<float>(&value)) { std::cout << *v << "\n"; } else if(auto v = boost::any_cast<double>(&value)) { std::cout << *v << "\n"; } else if(auto v = boost::any_cast<std::string>(&value)) { std::cout << *v << "\n"; } else if(std::vector<std::string> *v = boost::any_cast<std::vector<std::string>>(&value)) { std::vector<std::string> vv = *v; for(int i=0; i<vv.size(); i++) { std::cout << " " << vv[i] << " "; } std::cout << "\n"; } else { std::cout << "Not Printable\n"; } } std::cout << "--------------------------------------------------------------------------------------\n\n"; //see if the user specified the help flag if ( vm.count("help") ) { std::cout << "\n------------------------------\n"; std::cout << "This is Barret Zoph's GPU RNN library\n" << "The flags for the command line interface are below\n" << "Look at the README for an indepth tutorial and example commands\n" << "" << "\n"; std::cout << desc << "\n"; exit (EXIT_FAILURE); } if (vm.count("random-seed") ) { params.random_seed = true; } if (vm.count("tmp-dir-location")) { if (params.tmp_location != "") { if (params.tmp_location[params.tmp_location.size()-1]!='/') { params.tmp_location+="/"; } } } if(vm.count("shuffle")) { BZ_CUDA::shuffle_data = params.shuffle; } if(vm.count("logfile")) { params.HPC_output = true; //BZ_CUDA::HPC_output = true; } BZ_CUDA::logger.SetOutputLogger(params.HPC_output_file_name,params.HPC_output); //error checks to be sure only once of these options is set if (vm.count("train") && vm.count("decode")) { BZ_CUDA::logger << "ERROR: you cannot train and get decode at the same time\n"; exit (EXIT_FAILURE); } if (vm.count("train") && vm.count("force-decode")) { BZ_CUDA::logger << "ERROR: you cannot train and force-decode at the same time\n"; exit (EXIT_FAILURE); } if (vm.count("force-decode") && vm.count("decode")) { BZ_CUDA::logger << "ERROR: you cannot force-decode and get decode at the same time\n"; exit (EXIT_FAILURE); } if (!(vm.count("train") || vm.count("force-decode") || vm.count("decode")||vm.count("stoch-gen") || vm.count("cont-train") )) { BZ_CUDA::logger << "ERROR: you must either train,continue training,get decode,stoch generate data or force-decode\n"; exit (EXIT_FAILURE); } if(vm.count("parameter-range")) { BZ_CUDA::lower = lower_upper_range[0]; BZ_CUDA::upper = lower_upper_range[1]; } if(vm.count("cont-train")) { BZ_CUDA::cont_train = true; } else { BZ_CUDA::cont_train = false; } //this is for making sure dev_synch_all only loops over current GPU's specified // if(vm.count("multi-gpu")) { // if(gpu_indicies.size()==0) { // gpu_info::device_numbers.push_back(0); // } // else { // gpu_info::device_numbers = gpu_indicies; // } // } if(vm.count("clip-cell")) { if(clip_cell_vals.size()!=2) { BZ_CUDA::logger << "ERROR: clip-cell must have exactly two arguement\n"; exit (EXIT_FAILURE); } BZ_CUDA::clip_cell = true; BZ_CUDA::cell_clip_threshold = clip_cell_vals[0]; BZ_CUDA::error_clip_threshold = clip_cell_vals[1]; } params.longest_sent+=4; //because it is really 4 less if(vm.count("UNK-decode")) { BZ_CUDA::unk_replacement = true; BZ_CUDA::unk_rep_file_stream.open(BZ_CUDA::unk_rep_file_name.c_str()); for(int i=0; i<params.beam_size; i++) { BZ_CUDA::viterbi_alignments.push_back(-1); } for(int i=0; i<params.beam_size * params.longest_sent; i++) { BZ_CUDA::alignment_scores.push_back(0); } BZ_CUDA::h_align_indicies = (int*)malloc((2*params.attent_params.D+1)*params.beam_size*sizeof(int)); BZ_CUDA::h_alignment_values = (precision*)malloc((2*params.attent_params.D+1)*params.beam_size*sizeof(precision)); } if(vm.count("char-mt")) { params.char_params.char_cnn = true; params.char_params.filter_size = char_mt_vec[0]; params.char_params.char_emb_size = char_mt_vec[1]; params.char_params.num_highway_layers = char_mt_vec[2]; extract_char_info(params.char_params.longest_word,params.char_params.num_unique_chars_source, params.char_params.num_unique_chars_target,params.source_vocab_size,params.target_vocab_size, params.char_params.char_mapping_file,params.char_params.word_mapping_file); } if(vm.count("train") || vm.count("cont-train")) { if(vm.count("multi-source")) { if(multi_source.size()!=2) { BZ_CUDA::logger << "ERROR only two arguements for the multi-source flag\n"; exit (EXIT_FAILURE); } params.multi_src_params.multi_source = true; params.multi_src_params.file_name = multi_source[0]; params.multi_src_params.source_model_name = multi_source[1]; } //some basic error checks to parameters if(params.learning_rate<=0) { BZ_CUDA::logger << "ERROR: you cannot have a learning rate <=0\n"; exit (EXIT_FAILURE); } if(params.minibatch_size<=0) { BZ_CUDA::logger << "ERROR: you cannot have a minibatch of size <=0\n"; exit (EXIT_FAILURE); } if(params.LSTM_size<=0) { BZ_CUDA::logger << "ERROR: you cannot have a hiddenstate of size <=0\n"; exit (EXIT_FAILURE); } if(params.source_vocab_size<=0) { if(params.source_vocab_size!=-1) { BZ_CUDA::logger << "ERROR: you cannot have a source_vocab_size <=0\n"; exit (EXIT_FAILURE); } } if(params.target_vocab_size<=0) { if(params.target_vocab_size!=-1) { BZ_CUDA::logger << "ERROR: you cannot have a target_vocab_size <=0\n"; exit (EXIT_FAILURE); } } if(params.norm_clip<=0) { BZ_CUDA::logger << "ERROR: you cannot have your norm clip <=0\n"; exit (EXIT_FAILURE); } if(params.num_epochs<=0) { BZ_CUDA::logger << "ERROR: you cannot have num_epochs <=0\n"; exit (EXIT_FAILURE); } // if(vm.count("logfile")) { // params.HPC_output = true; // BZ_CUDA::HPC_output = true; // } if(vm.count("dropout")) { params.dropout = true; if(params.dropout_rate < 0 || params.dropout_rate > 1) { BZ_CUDA::logger << "ERROR: dropout rate must be between 0 and 1\n"; exit (EXIT_FAILURE); } } if(vm.count("matrix-clip-gradients")) { BZ_CUDA::global_clip_flag = false; params.clip_gradient = true; BZ_CUDA::individual_grad_clip = false; } if(vm.count("whole-clip-gradients")) { BZ_CUDA::global_clip_flag = true; params.clip_gradient = false; BZ_CUDA::individual_grad_clip = false; } if(vm.count("ind-clip-gradients")) { BZ_CUDA::global_clip_flag = false; params.clip_gradient = false; BZ_CUDA::individual_grad_clip = true; } if(vm.count("NCE")) { params.NCE = true; params.softmax = false; //BZ_CUDA::print_partition_function = true; } if(vm.count("UNK-replacement")) { params.unk_replace = true; } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); //BZ_CUDA::logger << "Unique_dir: " << params.unique_dir << "\n"; params.train_file_name = params.unique_dir+"/train.txt"; //number of layers //error checking is done when initializing model if(vm.count("multi-gpu")) { params.gpu_indicies = gpu_indicies; } if(vm.count("cont-train")) { //sequence model if(vm.count("sequence")) { if(cont_train.size()!=2) { BZ_CUDA::logger << (int)cont_train.size() << "\n"; BZ_CUDA::logger << "ERROR: two arguements to be supplied to the continue train flag\n"\ " 1. train data file name, 2. neural network file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.attent_params.attention_model = false; params.target_file_name = cont_train[0]; params.input_weight_file = cont_train[1]; params.output_weight_file = cont_train[1]; params.LM = true; params.load_model_train = true; params.load_model_name = params.input_weight_file; input_file_prep input_helper; input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.train_file_name, params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers); } else { if(cont_train.size()!=3) { BZ_CUDA::logger << "ERROR: three arguements to be supplied to the continue train flag\n"\ " 1. source train data file name 2. target train data file name 3. neural network file name \n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.LM = false; params.source_file_name = cont_train[0]; params.target_file_name = cont_train[1]; params.input_weight_file = cont_train[2]; params.output_weight_file = cont_train[2]; params.load_model_train = true; params.load_model_name = params.input_weight_file; BZ_CUDA::logger << "Load model name: " << params.load_model_name << "\n"; if(params.source_file_name == params.target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } input_file_prep input_helper; if(vm.count("multi-source")) { params.multi_src_params.int_file_name = params.unique_dir + params.multi_src_params.int_file_name; } if(params.char_params.char_cnn) { params.train_file_name = params.char_params.word_train_file; params.test_file_name = params.char_params.word_dev_file; params.output_weight_file = params.char_params.word_mapping_file; } else { input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name, params.target_file_name,params.train_file_name,params.longest_sent,params.minibatch_size,params.LSTM_size, params.source_vocab_size,params.target_vocab_size,params.num_layers,params.attent_params.attention_model, params.multi_src_params.multi_source,params.multi_src_params.file_name,params.multi_src_params.int_file_name, params.multi_src_params.source_model_name); } } } else { if(vm.count("num-layers")) { if(params.num_layers <=0) { BZ_CUDA::logger << "ERROR: you must have >= 1 layer for your model\n"; exit (EXIT_FAILURE); } } //now create the necessary files if(vm.count("sequence")) { if(train_files.size()!=2) { BZ_CUDA::logger << "ERROR: two arguements to be supplied to the train flag"\ " 1. train data file name, 2. neural network output name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.attent_params.attention_model = false; params.LM = true; params.target_file_name = train_files[0]; params.output_weight_file = train_files[1]; input_file_prep input_helper; if(vm.count("vocab-mapping-file")) { params.ensemble_train = true; } //this outputs the train.txt file along with the mappings and first line bool success=true; if(!params.ensemble_train) { success = input_helper.prep_files_train_LM(params.minibatch_size,params.longest_sent, params.target_file_name, params.train_file_name,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers); } else { success = input_helper.prep_files_train_LM_ensemble(params.minibatch_size,params.longest_sent, params.target_file_name, params.train_file_name,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name); } //clean up if error if(!success) { boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } else { //then sequence to sequence model if(train_files.size()!=3) { BZ_CUDA::logger << (int)train_files.size() <<"\n"; BZ_CUDA::logger << "ERROR: three arguements to be supplied to the train flag for the sequence to sequence model\n"\ " 1. source train data file name\n 2. target train data file name \n3. neural network output name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.LM = false; params.source_file_name = train_files[0]; params.target_file_name = train_files[1]; params.output_weight_file = train_files[2]; if(params.source_file_name == params.target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } //see if ensemble training if(vm.count("vocab-mapping-file")) { params.ensemble_train = true; } input_file_prep input_helper; bool success=true; //check if char if(params.char_params.char_cnn) { params.train_file_name = params.char_params.word_train_file; params.test_file_name = params.char_params.word_dev_file; params.output_weight_file = params.char_params.word_mapping_file; } else { if(params.multi_src_params.multi_source) { params.multi_src_params.int_file_name = params.unique_dir + params.multi_src_params.int_file_name; if(params.ensemble_train) { input_helper.prep_files_train_nonLM_multi_source_ensemble(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size, params.num_layers,params.multi_src_params.file_name,params.multi_src_params.int_file_name, params.multi_src_params.source_model_name,params.ensemble_train_file_name,params.multi_src_params.ensemble_train_file_name); } else { input_helper.prep_files_train_nonLM_multi_source(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size, params.num_layers,params.multi_src_params.file_name,params.multi_src_params.int_file_name, params.multi_src_params.source_model_name); } } else if(!params.ensemble_train) { success = input_helper.prep_files_train_nonLM(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.unk_replace,params.unk_aligned_width,params.attent_params.attention_model); } else { success = input_helper.prep_files_train_nonLM_ensemble(params.minibatch_size,params.longest_sent, params.source_file_name,params.target_file_name, params.train_file_name,params.source_vocab_size,params.target_vocab_size, params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name,params.attent_params.attention_model); } } //clean up if error if(!success) { boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } } if(vm.count("parameter-range")) { if(lower_upper_range.size()!=2) { BZ_CUDA::logger << "ERROR: you must have two inputs to parameter-range\n1.lower bound\n2. upper bound\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } BZ_CUDA::lower = lower_upper_range[0]; BZ_CUDA::upper = lower_upper_range[1]; if(BZ_CUDA::lower >= BZ_CUDA::upper) { BZ_CUDA::logger << "ERROR: the lower parameter range cannot be greater than the upper range\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } if(vm.count("fixed-halve-lr-full")) { params.stanford_learning_rate = true; } if(vm.count("fixed-halve-lr")) { params.google_learning_rate = true; if(params.epoch_to_start_halving<=0) { BZ_CUDA::logger << "ERROR: cannot halve learning rate until 1st epoch \n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } if(vm.count("adaptive-halve-lr")) { params.learning_rate_schedule = true; if(vm.count("sequence")) { if(adaptive_learning_rate.size()!=1) { BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes one arguement\n1.dev file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.dev_target_file_name = adaptive_learning_rate[0]; params.test_file_name = params.unique_dir + "/validation.txt"; input_file_prep input_helper; if(!params.char_params.char_cnn) { input_helper.integerize_file_LM(params.output_weight_file,params.dev_target_file_name,params.test_file_name, params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers); } } else { if(adaptive_learning_rate.size()!=2 && !params.multi_src_params.multi_source) { BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes two arguements\n1.source dev file name\n2.target dev file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(adaptive_learning_rate.size()!=3 && params.multi_src_params.multi_source) { BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes three arguements with multi-source\n1.source dev file name\n2.target dev file name\n3.other source dev file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(params.multi_src_params.multi_source) { params.multi_src_params.test_file_name = adaptive_learning_rate[2]; } params.dev_source_file_name = adaptive_learning_rate[0]; params.dev_target_file_name = adaptive_learning_rate[1]; params.test_file_name = params.unique_dir + "/validation.txt"; params.multi_src_params.int_file_name_test = params.unique_dir + params.multi_src_params.int_file_name_test; if(params.char_params.char_cnn) { params.train_file_name = params.char_params.word_train_file; params.test_file_name = params.char_params.word_dev_file; } if(params.dev_source_file_name == params.dev_target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } input_file_prep input_helper; if(!params.char_params.char_cnn) { input_helper.integerize_file_nonLM(params.output_weight_file,params.dev_source_file_name, params.dev_target_file_name,params.test_file_name, params.longest_sent,params.minibatch_size,params.LSTM_size,params.source_vocab_size,params.target_vocab_size,params.num_layers, params.attent_params.attention_model,params.multi_src_params.multi_source,params.multi_src_params.test_file_name,params.multi_src_params.int_file_name_test,params.multi_src_params.source_model_name); } } if(vm.count("best-model")) { params.best_model = true; } } if(vm.count("truncated-softmax")) { params.shortlist_size = std::stoi(trunc_info[0]); params.sampled_size = std::stoi(trunc_info[1]); params.truncated_softmax = true; if(params.shortlist_size + params.sampled_size > params.target_vocab_size) { BZ_CUDA::logger << "ERROR: you cannot have shortlist size + sampled size >= target vocab size\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } //put in the first line of the model file with the correct info //format: //0: num_layers //1: LSTM_size //2: target_vocab_size //3: source_vocab_size //4: attention_model //5: feed_input //6: multi_source //7: combine_LSTM //8: char_cnn add_model_info(params.num_layers,params.LSTM_size,params.target_vocab_size,params.source_vocab_size,params.attent_params.attention_model,params.attent_params.feed_input,\ params.multi_src_params.multi_source,params.multi_src_params.lstm_combine,params.char_params.char_cnn,params.output_weight_file); params.train= true; params.decode=false; params.test = false; params.stochastic_generation = false; return; } else { //checks here for things that should only be specified during training if(vm.count("train-source-RNN")) { std::cout << "Error train-source-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-RNN")) { std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-source-input-embedding")) { std::cout << "Error train-source-input-embedding should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-input-embedding")) { std::cout << "Error train-target-input-embedding should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-output-embedding")) { std::cout << "Error train-target-output-embedding should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-train-attention-target-RNN")) { std::cout << "Error train-train-attention-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("vocab-mapping-file-multi-source")) { std::cout << "Error vocab-mapping-file-multi-source should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("multi-source")) { std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("train-target-RNN")) { std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("multi-attention")) { std::cout << "Error multi-attention should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("lstm-combine")) { std::cout << "Error lstm-combine should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("num-layers")) { std::cout << "Error num-layers should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("dropout")) { std::cout << "Error dropout should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("learning-rate")) { std::cout << "Error learning-rate should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("random-seed")) { std::cout << "Error random-seed should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("hiddenstate-size")) { std::cout << "Error hiddenstate-size should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("NCE")) { std::cout << "Error NCE should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("NCE-share-samples")) { std::cout << "Error NCE-share-samples should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("attention-model")) { std::cout << "Error attention-model should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("attention-width")) { std::cout << "Error attention-width should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("feed-input")) { std::cout << "Error feed-input should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("source-vocab-size")) { std::cout << "Error source-vocab-size should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("target-vocab-size")) { std::cout << "Error target-vocab-size should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("parameter-range")) { std::cout << "Error parameter-range should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("number-epochs")) { std::cout << "Error number-epochs should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("matrix-clip-gradients")) { std::cout << "Error matrix-clip-gradients should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("whole-clip-gradients")) { std::cout << "Error whole-clip-gradients should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("adaptive-halve-lr")) { std::cout << "Error adaptive-halve-lr should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("clip-cell")) { std::cout << "Error clip-cell should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("adaptive-decrease-factor")) { std::cout << "Error adaptive-decrease-factor should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("fixed-halve-lr")) { std::cout << "Error fixed-halve-lr should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("fixed-halve-lr-full")) { std::cout << "Error fixed-halve-lr-full should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("screen-print-rate")) { std::cout << "Error screen-print-rate should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } if(vm.count("best-model")) { std::cout << "Error best-model should only be used during training (-t) or continue-training (-C)\n"; exit (EXIT_FAILURE); } } if(vm.count("decode")) { if (kbest_files.size()<3) { BZ_CUDA::logger << "ERROR: at least 4 arguements must be entered for --decode, 1. number of best outputs\n"\ " 2. neural network file name (this is the output file you get after training the neural network)\n"\ " 3. output file name\n"\ "Additionally more neural network file names can be added to do ensemble decoding\n"; exit (EXIT_FAILURE); } //fill into NULL if the user did not specify anything if(params.decode_user_files_additional.size()==0) { for(int i=0; i<params.decode_user_files.size(); i++) { params.decode_user_files_additional.push_back("NULL"); } } //once again fill in NULL if user did not specify if(params.model_names_multi_src.size()==0) { for(int i=0; i<params.decode_user_files.size(); i++) { params.model_names_multi_src.push_back("NULL"); } } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); // if(vm.count("tmp-dir-location")) { // params.unique_dir = params.tmp_location + params.unique_dir; // } //for ensembles for(int i=1; i<kbest_files.size()-1; i++) { params.model_names.push_back(kbest_files[i]); std::string temp_path = params.unique_dir+ "/kbest_tmp_" + std::to_string(i-1); params.decode_temp_files.push_back(temp_path); temp_path = params.unique_dir+ "/kbest_tmp_additional_" + std::to_string(i-1); params.decode_temp_files_additional.push_back(temp_path); } //BZ_CUDA::logger << "params.model_names: " << (int)params.model_names.size() << "\n"; //BZ_CUDA::logger << "decode_user_files: " << (int)params.decode_user_files.size() << "\n"; //BZ_CUDA::logger << "model_names_multi_src: " << (int)params.model_names_multi_src.size() << "\n"; if(params.model_names.size() != params.decode_user_files.size() || params.model_names.size() != params.model_names_multi_src.size()) { BZ_CUDA::logger << "ERROR: the same number of inputs must be specified as models\n"; exit (EXIT_FAILURE); } //params.decode_file_name = params.unique_dir+"/decoder_input.txt"; params.decoder_output_file = params.unique_dir+"/decoder_output.txt"; params.num_hypotheses =std::stoi(kbest_files[0]); //params.decode_tmp_file = kbest_files[1]; //params.input_weight_file = model_names[0]; params.decoder_final_file = kbest_files.back(); input_file_prep input_helper; // input_helper.integerize_file_LM(params.input_weight_file,params.decode_tmp_file,"tmp/decoder_input.txt", // params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,true,params.source_vocab_size); for(int i=0; i<params.decode_temp_files.size(); i++) { input_helper.integerize_file_kbest(params.model_names[i],params.decode_user_files[i],params.decode_temp_files[i], params.longest_sent,params.target_vocab_size,false,"NULL"); if(params.decode_user_files_additional[i]!= "NULL") { input_helper.integerize_file_kbest(params.model_names[i],params.decode_user_files_additional[i],params.decode_temp_files_additional[i], params.longest_sent,params.target_vocab_size,true,params.model_names_multi_src[i]); } } if(vm.count("multi-gpu")) { if(gpu_indicies.size()!=params.model_names.size()) { BZ_CUDA::logger << "ERROR: for decoding, each model must be specified a gpu\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.gpu_indicies = gpu_indicies; } else { for(int i=0; i<params.model_names.size(); i++) { params.gpu_indicies.push_back(0); } } if(params.beam_size<=0) { BZ_CUDA::logger << "ERROR: beam size cannot be <=0\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(params.penalty<0) { BZ_CUDA::logger << "ERROR: penalty cannot be less than zero\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(vm.count("Dump-LSTM")) { params.dump_LSTM=true; } if(vm.count("dec-ratio")) { if(decoding_ratio.size()!=2) { BZ_CUDA::logger << "Decoding ratio size: " << (int)decoding_ratio.size() << "\n"; BZ_CUDA::logger << decoding_ratio[0] << "\n"; BZ_CUDA::logger << "ERROR: only two inputs for decoding ratio\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.min_decoding_ratio = decoding_ratio[0]; params.max_decoding_ratio = decoding_ratio[1]; if(params.min_decoding_ratio >= params.max_decoding_ratio) { BZ_CUDA::logger << "ERROR: min decoding ratio must be <= max_decoding_ratio\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } } params.train = false; params.decode = true; params.test = false; params.stochastic_generation = false; params.LM = false; return; } if(vm.count("force-decode")) { BZ_CUDA::force_decode = true; if(vm.count("multi-gpu")) { params.gpu_indicies = gpu_indicies; } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); // if(vm.count("tmp-dir-location")) { // params.unique_dir = params.tmp_location + params.unique_dir; // } params.test_file_name = params.unique_dir + "/validation.txt"; if(vm.count("sequence")) { if(test_files.size()!=3) { BZ_CUDA::logger << "ERROR: force-decode takes three arguements 1.input file name (input sentences)"\ "2. neural network file name 3.output file name \n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.attent_params.attention_model = false; params.target_file_name = test_files[0]; params.input_weight_file = test_files[1]; params.output_force_decode = test_files[2]; params.LM = true; input_file_prep input_helper; input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.test_file_name, params.longest_sent,params.minibatch_size,false,params.LSTM_size,params.target_vocab_size,params.num_layers); } else { if(test_files.size()!=4) { BZ_CUDA::logger << "ERROR: force-decode takes four arguements: 1. source input file"\ " 2. target input file 3. neural network file name 4. output file name\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } params.LM = false; params.source_file_name = test_files[0]; params.target_file_name = test_files[1]; params.input_weight_file = test_files[2]; params.output_force_decode = test_files[3]; //stuff for attention model alignments params.attent_params.tmp_alignment_file = params.unique_dir + "/alignments.txt"; if(params.source_file_name == params.target_file_name) { BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n"; boost::filesystem::path temp_path(params.unique_dir); boost::filesystem::remove_all(temp_path); exit (EXIT_FAILURE); } if(vm.count("multi-source")) { if(multi_source.size()!=2) { BZ_CUDA::logger << "ERROR only two arguements for the multi-source flag\n"; exit (EXIT_FAILURE); } params.multi_src_params.multi_source = true; params.multi_src_params.test_file_name = multi_source[0]; params.multi_src_params.source_model_name = multi_source[1]; params.multi_src_params.int_file_name_test = params.unique_dir + params.multi_src_params.int_file_name_test; } if(!params.char_params.char_cnn) { input_file_prep input_helper; input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name, params.target_file_name,params.test_file_name,params.longest_sent,1,params.LSTM_size, params.source_vocab_size,params.target_vocab_size,params.num_layers,params.attent_params.attention_model, params.multi_src_params.multi_source,params.multi_src_params.test_file_name,params.multi_src_params.int_file_name_test, params.multi_src_params.source_model_name); } else { params.test_file_name = params.char_params.word_dev_file; } params.minibatch_size=1; } std::ifstream tmp_if_stream(params.input_weight_file.c_str()); std::string tmp_str; std::string tmp_word; std::getline(tmp_if_stream,tmp_str); std::istringstream my_ss(tmp_str,std::istringstream::in); std::vector<std::string> tmp_model_params; while(my_ss >> tmp_word) { tmp_model_params.push_back(tmp_word); } if(tmp_model_params.size() != 9) { BZ_CUDA::logger << "Error: the model file is not in the correct format for force-decode\n"; exit (EXIT_FAILURE); } params.num_layers = std::stoi(tmp_model_params[0]); params.LSTM_size = std::stoi(tmp_model_params[1]); params.target_vocab_size = std::stoi(tmp_model_params[2]); params.source_vocab_size = std::stoi(tmp_model_params[3]); params.attent_params.attention_model = std::stoi(tmp_model_params[4]); params.attent_params.feed_input = std::stoi(tmp_model_params[5]); params.multi_src_params.multi_source = std::stoi(tmp_model_params[6]); params.multi_src_params.lstm_combine = std::stoi(tmp_model_params[7]); params.char_params.char_cnn = std::stoi(tmp_model_params[8]); params.train= false; params.decode=false; params.test = true; // params.minibatch_size=1; params.stochastic_generation = false; return; } if(vm.count("stoch-gen")) { if(!vm.count("sequence")) { BZ_CUDA::logger << "ERROR: you can only do stoch-gen on the sequence model\n"; exit (EXIT_FAILURE); } if(stoicgen_files.size()!=2) { BZ_CUDA::logger << "ERROR: stoch-gen takes two inputs"\ " 1. neural network file name 2. output file name\n"; exit (EXIT_FAILURE); } boost::filesystem::path unique_path = boost::filesystem::unique_path(); if(vm.count("tmp-dir-location")) { unique_path = boost::filesystem::path(params.tmp_location + unique_path.string()); } BZ_CUDA::logger << "Temp directory being created named: " << unique_path.string() << "\n"; boost::filesystem::create_directories(unique_path); params.unique_dir = unique_path.string(); // if(vm.count("tmp-dir-location")) { // params.unique_dir = params.tmp_location + params.unique_dir; // } params.sg_output_file_temp = params.unique_dir + "/sg.txt"; params.input_weight_file = stoicgen_files[0]; params.sg_output_file = stoicgen_files[1]; std::ifstream weights_file; std::vector<std::string> info; std::string str; std::string word; weights_file.open(params.input_weight_file.c_str()); weights_file.seekg(0, std::ios::beg); std::getline(weights_file, str); //info from first sentence std::istringstream iss(str, std::istringstream::in); while(iss >> word) { info.push_back(word); } weights_file.close(); params.LSTM_size = std::stoi(info[1]); params.target_vocab_size = std::stoi(info[2]); params.LM = true; params.train= false; params.decode = false; params.test = false; params.minibatch_size = 1; params.stochastic_generation = true; return; } } catch(po::error& e) { std::cerr << "ERROR: " << e.what() << std::endl << std::endl; //std::cerr << desc << std::endl; exit (EXIT_FAILURE); } } void myexitfunc(void) { } int main(int argc, char **argv) { //Timing stuff std::chrono::time_point<std::chrono::system_clock> start_total, end_total, begin_minibatch,end_minibatch,begin_decoding,end_decoding,begin_epoch; std::chrono::duration<double> elapsed_seconds; start_total = std::chrono::system_clock::now(); //Initializing the model global_params params; //Declare all of the global parameters //create tmp directory if it does not exist already // if( !(boost::filesystem::exists("tmp/"))) { // std::cout << "Creating tmp directory for program\n"; // boost::filesystem::create_directory("tmp/"); // } //atexit(); //this is used to clean up the end of the code //file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file); //Initialize the file information BZ_CUDA::curr_seed = static_cast<unsigned int>(std::time(0)); BZ_CUDA::curr_seed = std::min((unsigned int)100000000,BZ_CUDA::curr_seed);//to prevent overflow //get the command line arguements command_line_parse(params,argc,argv); // if(params.HPC_output) { // std::cout << "Opening logfile: " << params.HPC_output_file_name << "\n"; // HPC_output.open(params.HPC_output_file_name); // } //randomize the seed if(params.random_seed) { BZ_CUDA::gen.seed(static_cast<unsigned int>(params.random_seed_int)); } else { BZ_CUDA::gen.seed(static_cast<unsigned int>(std::time(0))); } neuralMT_model<precision> model; //This is the model printIntroMessage(params); if(!params.decode) { model.initModel(params.LSTM_size,params.minibatch_size,params.source_vocab_size,params.target_vocab_size, params.longest_sent,params.debug,params.learning_rate,params.clip_gradient,params.norm_clip, params.input_weight_file,params.output_weight_file,params.softmax_scaled,params.train_perplexity,params.truncated_softmax, params.shortlist_size,params.sampled_size,params.LM,params.num_layers,params.gpu_indicies,params.dropout, params.dropout_rate,params.attent_params,params); } if(params.load_model_train) { std::string temp_swap_weights = model.input_weight_file; model.input_weight_file = params.load_model_name; model.load_weights(); model.input_weight_file = temp_swap_weights; } ////////////////////////////////////Train the model////////////////////////////////////// if(params.train) { //info for averaging the speed int curr_batch_num_SPEED = 0; const int thres_batch_num_SPEED = params.screen_print_rate;//set this to whatever int total_words_batch_SPEED = 0; double total_batch_time_SPEED = 0; //File info for the training file file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,params.train_total_words,params.truncated_softmax, params.shortlist_size,params.sampled_size,params.char_params,params.char_params.char_train_file); //Initialize the file information //model.initFileInfo(&file_info); params.half_way_count = params.train_total_words/2; if(params.google_learning_rate) { BZ_CUDA::logger << "Number of words at which to start halving the learning rate: " << params.half_way_count << "\n"; // if(params.HPC_output) { // HPC_output << "Words at which to start halving the learning rate: " << params.half_way_count << "\n"; // HPC_output.flush(); // } } int current_epoch = 1; BZ_CUDA::logger << "Starting model training\n"; BZ_CUDA::logger << "-----------------------------------" << "\n"; BZ_CUDA::logger << "Starting epoch 1\n"; BZ_CUDA::logger << "-----------------------------------" << "\n"; // if(params.HPC_output) { // HPC_output << "Starting model training\n"; // HPC_output << "Starting epoch 1\n"; // HPC_output.flush(); // } //stuff for learning rate schedule int total_words = 0; precision temp_learning_rate = params.learning_rate; //This is only for the google learning rate bool learning_rate_flag =true;//used for google learning rate for halving at every 0.5 epochs double old_perplexity = 0; model.train_perplexity = 0; //set the model perplexity to zero begin_epoch = std::chrono::system_clock::now(); while(current_epoch <= params.num_epochs) { begin_minibatch = std::chrono::system_clock::now(); bool success = file_info.read_minibatch(); if(model.multi_source) { model.src_fh.read_minibatch(); } end_minibatch = std::chrono::system_clock::now(); elapsed_seconds = end_minibatch-begin_minibatch; //std::cout << "File I/O time: " << elapsed_seconds.count()/60.0 << " minutes\n"; total_batch_time_SPEED+= elapsed_seconds.count(); begin_minibatch = std::chrono::system_clock::now(); //cudaProfilerStart(); model.initFileInfo(&file_info); model.compute_gradients(file_info.minibatch_tokens_source_input,file_info.minibatch_tokens_source_output, file_info.minibatch_tokens_target_input,file_info.minibatch_tokens_target_output, file_info.h_input_vocab_indicies_source,file_info.h_output_vocab_indicies_source, file_info.h_input_vocab_indicies_target,file_info.h_output_vocab_indicies_target, file_info.current_source_length,file_info.current_target_length, file_info.h_input_vocab_indicies_source_Wgrad,file_info.h_input_vocab_indicies_target_Wgrad, file_info.len_source_Wgrad,file_info.len_target_Wgrad,file_info.h_sampled_indices, file_info.len_unique_words_trunc_softmax,file_info.h_batch_info,&file_info); //cudaProfilerStop(); //return; // return 0; end_minibatch = std::chrono::system_clock::now(); elapsed_seconds = end_minibatch-begin_minibatch; total_batch_time_SPEED+= elapsed_seconds.count(); total_words_batch_SPEED+=file_info.words_in_minibatch; if(curr_batch_num_SPEED>=thres_batch_num_SPEED) { BZ_CUDA::logger << "Recent batch gradient L2 norm size (if using -w): " << BZ_CUDA::global_norm << "\n"; BZ_CUDA::logger << "Time to compute gradients for previous " << params.screen_print_rate << " minibatches: " << total_batch_time_SPEED/60.0 << " minutes\n"; BZ_CUDA::logger << "Number of words in previous " << params.screen_print_rate << " minibatches: " << total_words_batch_SPEED << "\n"; BZ_CUDA::logger << "Throughput for previous " << params.screen_print_rate << " minibatches: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n"; BZ_CUDA::logger << total_words << " words out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n"; // if(params.HPC_output) { // HPC_output << "Recent batch gradient L2 norm size: " << BZ_CUDA::global_norm << "\n"; // HPC_output << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n"; // HPC_output << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n"; // HPC_output << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n"; // HPC_output << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n"; // HPC_output.flush(); // } total_words_batch_SPEED = 0; total_batch_time_SPEED = 0; curr_batch_num_SPEED = 0; } curr_batch_num_SPEED++; total_words += file_info.words_in_minibatch; //stuff for google learning rate if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving && total_words>=params.half_way_count && learning_rate_flag) { temp_learning_rate = temp_learning_rate/2; BZ_CUDA::logger << "New Learning Rate: " << temp_learning_rate << "\n"; model.update_learning_rate(temp_learning_rate); learning_rate_flag = false; // if(params.HPC_output) { // HPC_output << "New Learning Rate: " << temp_learning_rate << "\n"; // HPC_output.flush(); // } } //stuff for perplexity based learning schedule if(params.learning_rate_schedule && total_words>=params.half_way_count &&learning_rate_flag) { learning_rate_flag = false; double new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,false,params.test_total_words,params.HPC_output,false,""); BZ_CUDA::logger << "Old dev set Perplexity: " << old_perplexity << "\n"; BZ_CUDA::logger << "New dev set Perplexity: " << new_perplexity << "\n"; // if(params.HPC_output) { // HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n"; // HPC_output << "New dev set Perplexity: " << new_perplexity << "\n"; // HPC_output.flush(); // } if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) { temp_learning_rate = temp_learning_rate*params.decrease_factor; model.update_learning_rate(temp_learning_rate); BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } //perplexity is better so output the best model file if((params.best_model && params.best_model_perp > new_perplexity) || BZ_CUDA::dump_every_best) { //BZ_CUDA::logger << "Writing model file: "<< params.best_model_file_name <<"\n"; model.dump_best_model(params.best_model_file_name,params.output_weight_file); // if(params.HPC_output) { // HPC_output << "Now outputting the new best model\n"; // HPC_output.flush(); // } params.best_model_perp = new_perplexity; } old_perplexity = new_perplexity; } if(!success) { current_epoch+=1; //stuff for google learning rate schedule if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving) { temp_learning_rate = temp_learning_rate/2; BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; model.update_learning_rate(temp_learning_rate); learning_rate_flag = true; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } //stuff for stanford learning rate schedule if(params.stanford_learning_rate && current_epoch>=params.epoch_to_start_halving_full) { temp_learning_rate = temp_learning_rate/2; BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; model.update_learning_rate(temp_learning_rate); learning_rate_flag = true; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } double new_perplexity; if(params.learning_rate_schedule) { new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,false,params.test_total_words,params.HPC_output,false,""); } //stuff for perplexity based learning schedule if(params.learning_rate_schedule) { BZ_CUDA::logger << "Old dev set Perplexity: " << old_perplexity << "\n"; BZ_CUDA::logger << "New dev set Perplexity: " << new_perplexity << "\n"; // if(params.HPC_output) { // HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n"; // HPC_output << "New dev set Perplexity: " << new_perplexity << "\n"; // HPC_output.flush(); // } if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) { temp_learning_rate = temp_learning_rate*params.decrease_factor; model.update_learning_rate(temp_learning_rate); BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n"; // if(params.HPC_output) { // HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n"; // HPC_output.flush(); // } } //perplexity is better so output the best model file if( (params.best_model && params.best_model_perp > new_perplexity) || BZ_CUDA::dump_every_best) { //BZ_CUDA::logger << "Now outputting the new best model\n"; model.dump_best_model(params.best_model_file_name,params.output_weight_file); // if(params.HPC_output) { // HPC_output << "Now outputting the new best model\n"; // HPC_output.flush(); // } params.best_model_perp = new_perplexity; } learning_rate_flag = true; old_perplexity = new_perplexity; } if(params.train_perplexity) { model.train_perplexity = model.train_perplexity/std::log(2.0); BZ_CUDA::logger << "PData on train set: " << model.train_perplexity << "\n"; BZ_CUDA::logger << "Total target words: " << file_info.total_target_words << "\n"; BZ_CUDA::logger << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n"; // if(params.HPC_output) { // HPC_output << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n"; // HPC_output.flush(); // } model.train_perplexity = 0; } total_words=0; if(current_epoch <= params.num_epochs) { elapsed_seconds = std::chrono::system_clock::now() - begin_epoch; BZ_CUDA::logger << "Previous Epoch time (minutes): " << (double)elapsed_seconds.count()/60.0 << "\n"; begin_epoch = std::chrono::system_clock::now(); BZ_CUDA::logger << "-----------------------------------" << "\n"; BZ_CUDA::logger << "Starting epoch " << current_epoch << "\n"; BZ_CUDA::logger << "-----------------------------------" << "\n"; // if(params.HPC_output) { // HPC_output << "-----------------------------------" << std::endl; // HPC_output << "Starting epoch " << current_epoch << std::endl; // HPC_output << "-----------------------------------" << std::endl; // HPC_output.flush(); // } } } devSynchAll(); } //Now that training is done, dump the weights devSynchAll(); model.dump_weights(); } /////////////////////////////////Get perplexity on test set//////////////////////////////// if(params.test) { model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent, params.source_vocab_size,params.target_vocab_size,true,params.test_total_words,params.HPC_output,true,params.output_force_decode); //now unint alignments if(model.attent_params.dump_alignments) { input_file_prep input_helper; model.output_alignments.close(); //input_helper.unint_alignments(params.input_weight_file,params.attent_params.tmp_alignment_file,params.attent_params.alignment_file); } } if(params.LM && params.stochastic_generation) { model.stoicastic_generation(params.sg_length,params.sg_output_file_temp,params.temperature); input_file_prep input_helper; input_helper.unint_file(params.input_weight_file,params.sg_output_file_temp,params.sg_output_file,true,false); } ///////////////////////////////////////////decode the model//////////////////////////////////////////// if(params.decode) { //std::cout << "-----------------Starting Decoding----------------\n"; begin_decoding = std::chrono::system_clock::now(); ensemble_factory<precision> ensemble_decode(params.model_names,params.num_hypotheses,params.beam_size, params.min_decoding_ratio, params.penalty, params.longest_sent,params.print_score, params.decoder_output_file,params.gpu_indicies,params.max_decoding_ratio, params.target_vocab_size,params); BZ_CUDA::logger << "-----------------Starting Decoding----------------\n"; ensemble_decode.decode_file(); end_decoding = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds = end_decoding-begin_decoding; BZ_CUDA::logger << "Decoding time: " << elapsed_seconds.count()/60.0 << " minutes\n"; //now unintegerize the file input_file_prep input_helper; //use model_names[0] since all models must have the same target vocab mapping and size input_helper.unint_file(params.model_names[0],params.decoder_output_file,params.decoder_final_file,false,true); } //remove the temp directory created if(params.unique_dir!="NULL") { boost::filesystem::path temp_path(params.unique_dir); //boost::filesystem::remove_all(temp_path); } //Compute the final runtime end_total = std::chrono::system_clock::now(); elapsed_seconds = end_total-start_total; BZ_CUDA::logger << "\n\n\n"; BZ_CUDA::logger << "Total Program Runtime: " << (double)elapsed_seconds.count()/60.0 << " minutes" << "\n"; }
b71c0117f95339709ca0d4fb3ec15f0236bb48f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Cuda accelerated motion estimation for VP8 libvpx encoder by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini for Italtel and Universita' degli Studi di Milano 2015-2016, Milano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <wchar.h> #include <locale.h> #include "vpx_config.h" #include "cuda/typedef_cuda.h" #include "cuda/me_cuda.h" #include "cuda/me_diamonds.h" #ifdef __cplusplus extern "C" { #endif #if HAVE_CUDA_ENABLED_DEVICE extern __constant__ int offset_16x12[128]; extern __constant__ int offset_16x12_refin[16]; __inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v ) { uint32_t w = 0; asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); return w; } __global__ void me_cuda_split (const uint8_t * __restrict__ const in_frame, const uint8_t * __restrict__ const ref_frame, int const streamID, int const stride, int const width, int const num_MB_width, int_mv * __restrict__ const MVs_g ) { __shared__ int diff[128][32]; __shared__ int minpos[128]; // configurazione di lancio: blocks: 16 x 1 x 1 // threads: 4 x 8 x 1 int TID = threadIdx.y * blockDim.x + threadIdx.x; // Thread Index (0..32) int i; int sad_result; int MBoffset = streamID * 16 + blockIdx.x; int blockX = MBoffset % num_MB_width; // colonna MB int blockY = MBoffset / num_MB_width; // riga MB // Occhio: immagine di riferimento ha cornice (larghezza tot = stride) mentre immagine input no (largh tot = width) int im_offset = 16 * (blockY * stride + blockX) + (2 * threadIdx.y * stride + 4 * threadIdx.x) + 32 * (stride + 1); int im_offset_raw = 16 * (blockY * width + blockX) + (2 * threadIdx.y * width + 4 * threadIdx.x); const uint8_t *refptr = ref_frame + im_offset; const uint8_t *imptr = in_frame + im_offset_raw; int delta_img = (1 * width); // riga successiva int delta_ref = (1 * stride); unsigned int img0 = (uint32_t) ( (*(imptr + 3) << 24) | (*(imptr + 2) << 16) | (*(imptr + 1) << 8) | *(imptr) ); unsigned int img1 = (uint32_t) ( (*(imptr + delta_img + 3) << 24) | (*(imptr + delta_img + 2) << 16) | (*(imptr + delta_img + 1) << 8) | *(imptr + delta_img) ); unsigned int ref0, ref1; for (i = 0; i < 128; i++){ const uint8_t *refp = refptr + offset_16x12[i]; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // Accumulazione for (i = 0; i < 16; i++) { diff[TID][i] += diff[TID][i+16]; diff[TID+32][i] += diff[TID+32][i+16]; diff[TID+64][i] += diff[TID+64][i+16]; diff[TID+96][i] += diff[TID+96][i+16]; } __syncthreads(); for (i = 0; i < 8; i++) { diff[TID][i] += diff[TID][i+8]; diff[TID+32][i] += diff[TID+32][i+8]; diff[TID+64][i] += diff[TID+64][i+8]; diff[TID+96][i] += diff[TID+96][i+8]; } __syncthreads(); for (i = 0; i < 4; i++) { diff[TID][i] += diff[TID][i+4]; diff[TID+32][i] += diff[TID+32][i+4]; diff[TID+64][i] += diff[TID+64][i+4]; diff[TID+96][i] += diff[TID+96][i+4]; } __syncthreads(); diff[TID][0] += (diff[TID][1] + diff[TID][2] + diff[TID][3]); diff[TID+32][0] += (diff[TID+32][1] + diff[TID+32][2] + diff[TID+32][3]); diff[TID+64][0] += (diff[TID+64][1] + diff[TID+64][2] + diff[TID+64][3]); diff[TID+96][0] += (diff[TID+96][1] + diff[TID+96][2] + diff[TID+96][3]); __syncthreads(); // Find MINIMUM (and corresponding best MV) of 128 Pts - 32 threads //////////////////////// // minpos[TID] = TID; minpos[TID+32] = TID+32; minpos[TID+64] = TID+64; minpos[TID+96] = TID+96; __syncthreads(); if( diff[TID][0] > diff[TID+32][0] ) { diff[TID][0] = diff[TID+32][0]; minpos[TID] = minpos[TID+32]; } if( diff[TID][0] > diff[TID+64][0] ) { diff[TID][0] = diff[TID+64][0]; minpos[TID] = minpos[TID+64]; } if( diff[TID][0] > diff[TID+96][0] ) { diff[TID][0] = diff[TID+96][0]; minpos[TID] = minpos[TID+96]; } __syncthreads(); if( TID < 16 ) // 16 threads if( diff[TID][0] > diff[TID + 16][0] ) { diff[TID][0] = diff[TID + 16][0]; minpos[TID] = minpos[TID + 16]; } __syncthreads(); if( TID < 8 ) if( diff[TID][0] > diff[TID + 8][0] ) { diff[TID][0] = diff[TID + 8][0]; minpos[TID] = minpos[TID + 8]; } __syncthreads(); if( TID < 4 ) if( diff[TID][0] > diff[TID + 4][0] ) { diff[TID][0] = diff[TID + 4][0]; minpos[TID] = minpos[TID + 4]; } __syncthreads(); if( TID < 2 ) if( diff[TID][0] > diff[TID + 2][0] ) { diff[TID][0] = diff[TID + 2][0]; minpos[TID] = minpos[TID + 2]; } __syncthreads(); if( TID == 0 ) { if( diff[0][0] > diff[1][0] ) { diff[0][0] = diff[1][0]; minpos[0] = minpos[1]; } MVs_g[MBoffset].as_mv = MV_16x12_lookup[ minpos[0] ]; } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////// // STEP 2: pixel-scale Motion Vector Search // Update RefPointer to the best motion vector refptr += offset_16x12[ minpos[0] ] ; // Compute pixel differences: ////////// // for (i = 0; i < 16; i++){ const uint8_t *refp = refptr + offset_16x12_refin[i]; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // accumulazione su 32 thread // (16 calcolati inutilmente) for (i=0; i<16; i++) diff[TID][i] += diff[TID][i+16]; __syncthreads(); for (i=0; i<8; i++) diff[TID][i] += diff[TID][i+8]; __syncthreads(); for (i=0; i<4; i++) diff[TID][i] += diff[TID][i+4]; __syncthreads(); diff[TID][0] += (diff[TID][1] + diff[TID][2] + diff[TID][3]); __syncthreads(); // Find MINIMUM (and corresponding best MV) of 16 Pts - 16 threads //////////////////////// // minpos[TID] = TID; __syncthreads(); if( TID < 8 ) // 8 threads if( diff[TID][0] > diff[TID+8][0] ) { diff[TID][0] = diff[TID+8][0]; minpos[TID] = minpos[TID+8]; } __syncthreads(); if( TID < 4 ) // 4 threads if( diff[TID][0] > diff[TID + 4][0] ) { diff[TID][0] = diff[TID + 4][0]; minpos[TID] = minpos[TID + 4]; } __syncthreads(); if( TID < 2 ) // 2 threads if( diff[TID][0] > diff[TID + 2][0] ) { diff[TID][0] = diff[TID + 2][0]; minpos[TID] = minpos[TID + 2]; } __syncthreads(); if( TID == 0 ) // Only thread 0 { if( diff[0][0] > diff[1][0] ) { diff[0][0] = diff[1][0]; minpos[0] = minpos[1]; } MVs_g[MBoffset].as_mv.row += MV_lookup_refin_fast[ minpos[0] ].row; // Added to the previous MV MVs_g[MBoffset].as_mv.col += MV_lookup_refin_fast[ minpos[0] ].col; // Added to the previous MV MVs_g[MBoffset].as_mv.row <<= 3; MVs_g[MBoffset].as_mv.col <<= 3; } } inline void me_kernel_launch_split( VP8_COMMON * const common, const uint8_t * const in_frame, const uint8_t * const ref_frame, int const streamID, int_mv * const MVs ) { #if CUDA_VERBOSE float elapsedTime; hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start)); #endif hipLaunchKernelGGL(( me_cuda_split) , dim3(common->GPU.gridDim), dim3(common->GPU.blockDim), 0, common->GPU.streams.frame[streamID] , in_frame, ref_frame, streamID, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, MVs ); #if CUDA_VERBOSE CHECK(hipEventRecord(stop)); CHECK(hipEventSynchronize(stop)); CHECK(hipEventElapsedTime(&elapsedTime, start, stop)); printf("\n[GPU] ME elapsed time streams[%d]: %.4f ms\n",streamID,elapsedTime); CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); add_STATS((double)elapsedTime,0); #endif } void me_cuda_launch_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { int MV_size_16 = 16*sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int s = 0; s < cm->GPU.num_mb16th; s++) { int offset = 16*s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e hipMemcpyAsync andava a leggere oltre i limiti degli array if (offset + 16 > cm->gpu_frame.num_mv) { MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv ); } if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0] ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1] ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2] ); CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } void me_cuda_launch_not_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { int MV_size_16 = 16*sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int s = 0; s < cm->GPU.num_mb16th; s++) { int offset = 16*s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e hipMemcpyAsync andava a leggere oltre i limiti degli array if (offset + 16 > cm->gpu_frame.num_mv) { MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv ); } if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0] ); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1] ); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2] ); } if (ref_frame_flags & GPUFLAG_LAST_FRAME) { CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } #endif /* HAVE_CUDA_ENABLED_DEVICE */ #ifdef __cplusplus } #endif
b71c0117f95339709ca0d4fb3ec15f0236bb48f6.cu
/* Cuda accelerated motion estimation for VP8 libvpx encoder by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini for Italtel and Universita' degli Studi di Milano 2015-2016, Milano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <wchar.h> #include <locale.h> #include "vpx_config.h" #include "cuda/typedef_cuda.h" #include "cuda/me_cuda.h" #include "cuda/me_diamonds.h" #ifdef __cplusplus extern "C" { #endif #if HAVE_CUDA_ENABLED_DEVICE extern __constant__ int offset_16x12[128]; extern __constant__ int offset_16x12_refin[16]; __inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v ) { uint32_t w = 0; asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(w) : "r"(u), "r"(v), "r"(w)); return w; } __global__ void me_cuda_split (const uint8_t * __restrict__ const in_frame, const uint8_t * __restrict__ const ref_frame, int const streamID, int const stride, int const width, int const num_MB_width, int_mv * __restrict__ const MVs_g ) { __shared__ int diff[128][32]; __shared__ int minpos[128]; // configurazione di lancio: blocks: 16 x 1 x 1 // threads: 4 x 8 x 1 int TID = threadIdx.y * blockDim.x + threadIdx.x; // Thread Index (0..32) int i; int sad_result; int MBoffset = streamID * 16 + blockIdx.x; int blockX = MBoffset % num_MB_width; // colonna MB int blockY = MBoffset / num_MB_width; // riga MB // Occhio: immagine di riferimento ha cornice (larghezza tot = stride) mentre immagine input no (largh tot = width) int im_offset = 16 * (blockY * stride + blockX) + (2 * threadIdx.y * stride + 4 * threadIdx.x) + 32 * (stride + 1); int im_offset_raw = 16 * (blockY * width + blockX) + (2 * threadIdx.y * width + 4 * threadIdx.x); const uint8_t *refptr = ref_frame + im_offset; const uint8_t *imptr = in_frame + im_offset_raw; int delta_img = (1 * width); // riga successiva int delta_ref = (1 * stride); unsigned int img0 = (uint32_t) ( (*(imptr + 3) << 24) | (*(imptr + 2) << 16) | (*(imptr + 1) << 8) | *(imptr) ); unsigned int img1 = (uint32_t) ( (*(imptr + delta_img + 3) << 24) | (*(imptr + delta_img + 2) << 16) | (*(imptr + delta_img + 1) << 8) | *(imptr + delta_img) ); unsigned int ref0, ref1; for (i = 0; i < 128; i++){ const uint8_t *refp = refptr + offset_16x12[i]; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // Accumulazione for (i = 0; i < 16; i++) { diff[TID][i] += diff[TID][i+16]; diff[TID+32][i] += diff[TID+32][i+16]; diff[TID+64][i] += diff[TID+64][i+16]; diff[TID+96][i] += diff[TID+96][i+16]; } __syncthreads(); for (i = 0; i < 8; i++) { diff[TID][i] += diff[TID][i+8]; diff[TID+32][i] += diff[TID+32][i+8]; diff[TID+64][i] += diff[TID+64][i+8]; diff[TID+96][i] += diff[TID+96][i+8]; } __syncthreads(); for (i = 0; i < 4; i++) { diff[TID][i] += diff[TID][i+4]; diff[TID+32][i] += diff[TID+32][i+4]; diff[TID+64][i] += diff[TID+64][i+4]; diff[TID+96][i] += diff[TID+96][i+4]; } __syncthreads(); diff[TID][0] += (diff[TID][1] + diff[TID][2] + diff[TID][3]); diff[TID+32][0] += (diff[TID+32][1] + diff[TID+32][2] + diff[TID+32][3]); diff[TID+64][0] += (diff[TID+64][1] + diff[TID+64][2] + diff[TID+64][3]); diff[TID+96][0] += (diff[TID+96][1] + diff[TID+96][2] + diff[TID+96][3]); __syncthreads(); // Find MINIMUM (and corresponding best MV) of 128 Pts - 32 threads //////////////////////// // minpos[TID] = TID; minpos[TID+32] = TID+32; minpos[TID+64] = TID+64; minpos[TID+96] = TID+96; __syncthreads(); if( diff[TID][0] > diff[TID+32][0] ) { diff[TID][0] = diff[TID+32][0]; minpos[TID] = minpos[TID+32]; } if( diff[TID][0] > diff[TID+64][0] ) { diff[TID][0] = diff[TID+64][0]; minpos[TID] = minpos[TID+64]; } if( diff[TID][0] > diff[TID+96][0] ) { diff[TID][0] = diff[TID+96][0]; minpos[TID] = minpos[TID+96]; } __syncthreads(); if( TID < 16 ) // 16 threads if( diff[TID][0] > diff[TID + 16][0] ) { diff[TID][0] = diff[TID + 16][0]; minpos[TID] = minpos[TID + 16]; } __syncthreads(); if( TID < 8 ) if( diff[TID][0] > diff[TID + 8][0] ) { diff[TID][0] = diff[TID + 8][0]; minpos[TID] = minpos[TID + 8]; } __syncthreads(); if( TID < 4 ) if( diff[TID][0] > diff[TID + 4][0] ) { diff[TID][0] = diff[TID + 4][0]; minpos[TID] = minpos[TID + 4]; } __syncthreads(); if( TID < 2 ) if( diff[TID][0] > diff[TID + 2][0] ) { diff[TID][0] = diff[TID + 2][0]; minpos[TID] = minpos[TID + 2]; } __syncthreads(); if( TID == 0 ) { if( diff[0][0] > diff[1][0] ) { diff[0][0] = diff[1][0]; minpos[0] = minpos[1]; } MVs_g[MBoffset].as_mv = MV_16x12_lookup[ minpos[0] ]; } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////////// // STEP 2: pixel-scale Motion Vector Search // Update RefPointer to the best motion vector refptr += offset_16x12[ minpos[0] ] ; // Compute pixel differences: ////////// // for (i = 0; i < 16; i++){ const uint8_t *refp = refptr + offset_16x12_refin[i]; ref0 = (uint32_t)( *(refp + 3) << 24 | *(refp + 2) << 16 | *(refp + 1) << 8 | *(refp) ); ref1 = (uint32_t)( *(refp + delta_ref + 3) << 24 | *(refp + delta_ref + 2) << 16 | *(refp + delta_ref + 1) << 8 | *(refp + delta_ref) ); sad_result = __vabsdiff4( img0, ref0 ); sad_result += __vabsdiff4( img1, ref1 ); diff[i][TID] = sad_result; } __syncthreads(); // accumulazione su 32 thread // (16 calcolati inutilmente) for (i=0; i<16; i++) diff[TID][i] += diff[TID][i+16]; __syncthreads(); for (i=0; i<8; i++) diff[TID][i] += diff[TID][i+8]; __syncthreads(); for (i=0; i<4; i++) diff[TID][i] += diff[TID][i+4]; __syncthreads(); diff[TID][0] += (diff[TID][1] + diff[TID][2] + diff[TID][3]); __syncthreads(); // Find MINIMUM (and corresponding best MV) of 16 Pts - 16 threads //////////////////////// // minpos[TID] = TID; __syncthreads(); if( TID < 8 ) // 8 threads if( diff[TID][0] > diff[TID+8][0] ) { diff[TID][0] = diff[TID+8][0]; minpos[TID] = minpos[TID+8]; } __syncthreads(); if( TID < 4 ) // 4 threads if( diff[TID][0] > diff[TID + 4][0] ) { diff[TID][0] = diff[TID + 4][0]; minpos[TID] = minpos[TID + 4]; } __syncthreads(); if( TID < 2 ) // 2 threads if( diff[TID][0] > diff[TID + 2][0] ) { diff[TID][0] = diff[TID + 2][0]; minpos[TID] = minpos[TID + 2]; } __syncthreads(); if( TID == 0 ) // Only thread 0 { if( diff[0][0] > diff[1][0] ) { diff[0][0] = diff[1][0]; minpos[0] = minpos[1]; } MVs_g[MBoffset].as_mv.row += MV_lookup_refin_fast[ minpos[0] ].row; // Added to the previous MV MVs_g[MBoffset].as_mv.col += MV_lookup_refin_fast[ minpos[0] ].col; // Added to the previous MV MVs_g[MBoffset].as_mv.row <<= 3; MVs_g[MBoffset].as_mv.col <<= 3; } } inline void me_kernel_launch_split( VP8_COMMON * const common, const uint8_t * const in_frame, const uint8_t * const ref_frame, int const streamID, int_mv * const MVs ) { #if CUDA_VERBOSE float elapsedTime; cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start)); #endif me_cuda_split <<< common->GPU.gridDim, common->GPU.blockDim, 0, common->GPU.streams.frame[streamID] >>> (in_frame, ref_frame, streamID, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, MVs ); #if CUDA_VERBOSE CHECK(cudaEventRecord(stop)); CHECK(cudaEventSynchronize(stop)); CHECK(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("\n[GPU] ME elapsed time streams[%d]: %.4f ms\n",streamID,elapsedTime); CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); add_STATS((double)elapsedTime,0); #endif } void me_cuda_launch_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { int MV_size_16 = 16*sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int s = 0; s < cm->GPU.num_mb16th; s++) { int offset = 16*s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e cudaMemcpyAsync andava a leggere oltre i limiti degli array if (offset + 16 > cm->gpu_frame.num_mv) { MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv ); } if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0] ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1] ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2] ); CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } void me_cuda_launch_not_interleaved_split( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) { int MV_size_16 = 16*sizeof(int_mv); // for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625 // at the beginning of encode_frame_to_data_rate(..) in onyx_if.c for (int s = 0; s < cm->GPU.num_mb16th; s++) { int offset = 16*s; // bugfix per immagini il cui n di mb non e' divisibile per 16 // prima venivano lanciati troppi processi e cudaMemcpyAsync andava a leggere oltre i limiti degli array if (offset + 16 > cm->gpu_frame.num_mv) { MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv ); } if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0] ); } // Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora... if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1] ); } // Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora... if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { me_kernel_launch_split(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2] ); } if (ref_frame_flags & GPUFLAG_LAST_FRAME) { CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) { CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) { CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s])); } } } #endif /* HAVE_CUDA_ENABLED_DEVICE */ #ifdef __cplusplus } #endif
788022758fab355627ca67ea0bb386322db9da36.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/cuml.hpp> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <cuml/tsa/arima_common.h> #include <random/rng.h> #include <cuml/tsa/batched_arima.hpp> #include "benchmark.cuh" namespace ML { namespace Bench { namespace Arima { struct ArimaParams { TimeSeriesParams data; ARIMAOrder order; }; template <typename DataT> class ArimaLoglikelihood : public TsFixtureRandom<DataT> { public: ArimaLoglikelihood(const std::string& name, const ArimaParams& p) : TsFixtureRandom<DataT>(p.data), order(p.order) { this->SetName(name.c_str()); } // Note: public function because of the __device__ lambda void runBenchmark(::benchmark::State& state) override { auto& handle = *this->handle; auto stream = handle.getStream(); auto counting = thrust::make_counting_iterator(0); // Generate random parameters int N = order.complexity(); MLCommon::Random::Rng gpu_gen(this->params.seed, MLCommon::Random::GenPhilox); gpu_gen.uniform(param, N * this->params.batch_size, -1.0, 1.0, stream); // Set sigma2 parameters to 1.0 DataT* x = param; // copy the object attribute for thrust thrust::for_each(thrust::hip::par.on(stream), counting, counting + this->params.batch_size, [=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; }); CUDA_CHECK(hipStreamSynchronize(stream)); // Benchmark loop for (auto _ : state) { CudaEventTimer timer(handle, state, true, stream); // Evaluate log-likelihood batched_loglike(handle, this->data.X, this->params.batch_size, this->params.n_obs, order, param, loglike, residual, true, false); } } void allocateBuffers(const ::benchmark::State& state) { auto& handle = *this->handle; auto stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); // Buffer for the model parameters param = (DataT*)allocator->allocate( order.complexity() * this->params.batch_size * sizeof(DataT), stream); // Buffers for the log-likelihood and residuals loglike = (DataT*)allocator->allocate( this->params.batch_size * sizeof(DataT), stream); residual = (DataT*)allocator->allocate( this->params.batch_size * (this->params.n_obs - order.lost_in_diff()) * sizeof(DataT), stream); } void deallocateBuffers(const ::benchmark::State& state) { auto& handle = *this->handle; auto stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); allocator->deallocate( param, order.complexity() * this->params.batch_size * sizeof(DataT), stream); allocator->deallocate(loglike, this->params.batch_size * sizeof(DataT), stream); allocator->deallocate(residual, this->params.batch_size * (this->params.n_obs - order.lost_in_diff()) * sizeof(DataT), stream); } protected: ARIMAOrder order; DataT* param; DataT* loglike; DataT* residual; }; std::vector<ArimaParams> getInputs() { struct std::vector<ArimaParams> out; ArimaParams p; p.data.seed = 12345ULL; std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1, 4, 0}, {1, 1, 1, 1, 1, 1, 12, 0}, {1, 1, 1, 1, 1, 1, 24, 0}, {1, 1, 1, 1, 1, 1, 52, 0}}; std::vector<int> list_batch_size = {10, 100, 1000, 10000}; std::vector<int> list_n_obs = {200, 500, 1000}; for (auto& order : list_order) { for (auto& batch_size : list_batch_size) { for (auto& n_obs : list_n_obs) { p.order = order; p.data.batch_size = batch_size; p.data.n_obs = n_obs; out.push_back(p); } } } return out; } CUML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima", getInputs()); } // namespace Arima } // namespace Bench } // namespace ML
788022758fab355627ca67ea0bb386322db9da36.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/cuml.hpp> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <cuml/tsa/arima_common.h> #include <random/rng.h> #include <cuml/tsa/batched_arima.hpp> #include "benchmark.cuh" namespace ML { namespace Bench { namespace Arima { struct ArimaParams { TimeSeriesParams data; ARIMAOrder order; }; template <typename DataT> class ArimaLoglikelihood : public TsFixtureRandom<DataT> { public: ArimaLoglikelihood(const std::string& name, const ArimaParams& p) : TsFixtureRandom<DataT>(p.data), order(p.order) { this->SetName(name.c_str()); } // Note: public function because of the __device__ lambda void runBenchmark(::benchmark::State& state) override { auto& handle = *this->handle; auto stream = handle.getStream(); auto counting = thrust::make_counting_iterator(0); // Generate random parameters int N = order.complexity(); MLCommon::Random::Rng gpu_gen(this->params.seed, MLCommon::Random::GenPhilox); gpu_gen.uniform(param, N * this->params.batch_size, -1.0, 1.0, stream); // Set sigma2 parameters to 1.0 DataT* x = param; // copy the object attribute for thrust thrust::for_each(thrust::cuda::par.on(stream), counting, counting + this->params.batch_size, [=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; }); CUDA_CHECK(cudaStreamSynchronize(stream)); // Benchmark loop for (auto _ : state) { CudaEventTimer timer(handle, state, true, stream); // Evaluate log-likelihood batched_loglike(handle, this->data.X, this->params.batch_size, this->params.n_obs, order, param, loglike, residual, true, false); } } void allocateBuffers(const ::benchmark::State& state) { auto& handle = *this->handle; auto stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); // Buffer for the model parameters param = (DataT*)allocator->allocate( order.complexity() * this->params.batch_size * sizeof(DataT), stream); // Buffers for the log-likelihood and residuals loglike = (DataT*)allocator->allocate( this->params.batch_size * sizeof(DataT), stream); residual = (DataT*)allocator->allocate( this->params.batch_size * (this->params.n_obs - order.lost_in_diff()) * sizeof(DataT), stream); } void deallocateBuffers(const ::benchmark::State& state) { auto& handle = *this->handle; auto stream = handle.getStream(); auto allocator = handle.getDeviceAllocator(); allocator->deallocate( param, order.complexity() * this->params.batch_size * sizeof(DataT), stream); allocator->deallocate(loglike, this->params.batch_size * sizeof(DataT), stream); allocator->deallocate(residual, this->params.batch_size * (this->params.n_obs - order.lost_in_diff()) * sizeof(DataT), stream); } protected: ARIMAOrder order; DataT* param; DataT* loglike; DataT* residual; }; std::vector<ArimaParams> getInputs() { struct std::vector<ArimaParams> out; ArimaParams p; p.data.seed = 12345ULL; std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1, 4, 0}, {1, 1, 1, 1, 1, 1, 12, 0}, {1, 1, 1, 1, 1, 1, 24, 0}, {1, 1, 1, 1, 1, 1, 52, 0}}; std::vector<int> list_batch_size = {10, 100, 1000, 10000}; std::vector<int> list_n_obs = {200, 500, 1000}; for (auto& order : list_order) { for (auto& batch_size : list_batch_size) { for (auto& n_obs : list_n_obs) { p.order = order; p.data.batch_size = batch_size; p.data.n_obs = n_obs; out.push_back(p); } } } return out; } CUML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima", getInputs()); } // namespace Arima } // namespace Bench } // namespace ML
d528ef4f61f9b55a4e7de4b84e46479041cc3a39.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "randomforest.h" namespace ML { /** * @brief Construct RF_metrics. * @param[in] cfg_accuracy: accuracy. */ RF_metrics::RF_metrics(float cfg_accuracy) : accuracy(cfg_accuracy) {}; /** * @brief Print accuracy metric. */ void RF_metrics::print() { std::cout << "Accuracy: " << accuracy << std::endl; } /** * @brief Update labels so they are unique from 0 to n_unique_labels values. Create/update an old label to new label map per random forest. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in,out] labels_map: map of old label values to new ones. * @param[in] verbose: debugging flag. */ void preprocess_labels(int n_rows, std::vector<int> & labels, std::map<int, int> & labels_map, bool verbose) { std::pair<std::map<int, int>::iterator, bool> ret; int n_unique_labels = 0; if (verbose) std::cout << "Preprocessing labels\n"; for (int i = 0; i < n_rows; i++) { ret = labels_map.insert(std::pair<int, int>(labels[i], n_unique_labels)); if (ret.second) { n_unique_labels += 1; } if (verbose) std::cout << "Mapping " << labels[i] << " to "; labels[i] = ret.first->second; //Update labels **IN-PLACE** if (verbose) std::cout << labels[i] << std::endl; } if (verbose) std::cout << "Finished preprocessing labels\n"; } /** * @brief Revert label preprocessing effect, if needed. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in] labels_map: map of old to new label values used during preprocessing. * @param[in] verbose: debugging flag. */ void postprocess_labels(int n_rows, std::vector<int> & labels, std::map<int, int> & labels_map, bool verbose) { if (verbose) std::cout << "Postrocessing labels\n"; std::map<int, int>::iterator it; int n_unique_cnt = labels_map.size(); std::vector<int> reverse_map; reverse_map.resize(n_unique_cnt); for (auto it = labels_map.begin(); it != labels_map.end(); it++) { reverse_map[it->second] = it->first; } for (int i = 0; i < n_rows; i++) { if (verbose) std::cout << "Mapping " << labels[i] << " back to " << reverse_map[labels[i]] << std::endl; labels[i] = reverse_map[labels[i]]; } if (verbose) std::cout << "Finished postrocessing labels\n"; } /** * @brief Random forest hyper-parameter object constructor to set n_trees member. */ RF_params::RF_params(int cfg_n_trees):n_trees(cfg_n_trees) {} /** * @brief Random forest hyper-parameter object constructor to set bootstrap, bootstrap_features, n_trees and rows_sample members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample):bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Random forest hyper-parameter object constructor to set all RF_params members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample, DecisionTree::DecisionTreeParams cfg_tree_params):bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample), tree_params(cfg_tree_params) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Check validity of all random forest hyper-parameters. */ void RF_params::validity_check() const { ASSERT((n_trees > 0), "Invalid n_trees %d", n_trees); ASSERT((rows_sample > 0) && (rows_sample <= 1.0), "rows_sample value %f outside permitted (0, 1] range", rows_sample); tree_params.validity_check(); } /** * @brief Print all random forest hyper-parameters. */ void RF_params::print() const { std::cout << "bootstrap: " << bootstrap << std::endl; std::cout << "bootstrap features: " << bootstrap_features << std::endl; std::cout << "n_trees: " << n_trees << std::endl; std::cout << "rows_sample: " << rows_sample << std::endl; tree_params.print(); } /** * @brief Construct rf (random forest) object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. * @param[in] cfg_rf_type: Random forest type. Only CLASSIFICATION is currently supported. */ template<typename T> rf<T>::rf(RF_params cfg_rf_params, int cfg_rf_type):rf_params(cfg_rf_params), rf_type(cfg_rf_type), trees(nullptr) { rf_params.validity_check(); } /** * @brief Destructor for random forest object. * @tparam T: data type for input data (float or double). */ template<typename T> rf<T>::~rf() { delete [] trees; } /** * @brief Return number of trees in the forest. * @tparam T: data type for input data (float or double). */ template<typename T> int rf<T>::get_ntrees() { return rf_params.n_trees; } /** * @brief Print summary for all trees in the random forest. * @tparam T: data type for input data (float or double). */ template<typename T> void rf<T>::print_rf_summary() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print_tree_summary(); } } } /** * @brief Print detailed view of all trees in the random forest. * @tparam T: data type for input data (float or double). */ template<typename T> void rf<T>::print_rf_detailed() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print(); } } } /** * @brief Construct rfClassifier object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. */ template <typename T> rfClassifier<T>::rfClassifier(RF_params cfg_rf_params): rf<T>::rf(cfg_rf_params, RF_type::CLASSIFICATION) {}; /** * @brief Build (i.e., fit, train) random forest classifier for input data. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ template <typename T> void rfClassifier<T>::fit(const cumlHandle& user_handle, T * input, int n_rows, int n_cols, int * labels, int n_unique_labels) { ASSERT(!this->trees, "Cannot fit an existing forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); rfClassifier::trees = new DecisionTree::DecisionTreeClassifier<T>[this->rf_params.n_trees]; int n_sampled_rows = this->rf_params.rows_sample * n_rows; const cumlHandle_impl& handle = user_handle.getImpl(); hipStream_t stream = user_handle.getStream(); for (int i = 0; i < this->rf_params.n_trees; i++) { // Select n_sampled_rows (with replacement) numbers from [0, n_rows) per tree. // selected_rows: randomly generated IDs for bootstrapped samples (w/ replacement); a device ptr. MLCommon::device_buffer<unsigned int> selected_rows(handle.getDeviceAllocator(), stream, n_sampled_rows); if (this->rf_params.bootstrap) { MLCommon::Random::Rng r(i * 1000); // Ensure the seed for each tree is different and meaningful. r.uniformInt(selected_rows.data(), n_sampled_rows, (unsigned int) 0, (unsigned int) n_rows, stream); } else { std::vector<unsigned int> h_selected_rows(n_rows); std::iota(h_selected_rows.begin(), h_selected_rows.end(), 0); std::random_shuffle(h_selected_rows.begin(), h_selected_rows.end()); h_selected_rows.resize(n_sampled_rows); MLCommon::updateDevice(selected_rows.data(), h_selected_rows.data(), n_sampled_rows, stream); } /* Build individual tree in the forest. - input is a pointer to orig data that have n_cols features and n_rows rows. - n_sampled_rows: # rows sampled for tree's bootstrap sample. - selected_rows: points to a list of row #s (w/ n_sampled_rows elements) used to build the bootstrapped sample. Expectation: Each tree node will contain (a) # n_sampled_rows and (b) a pointer to a list of row numbers w.r.t original data. */ this->trees[i].fit(user_handle, input, n_cols, n_rows, labels, selected_rows.data(), n_sampled_rows, n_unique_labels, this->rf_params.tree_params); //Cleanup selected_rows.release(stream); } } /** * @brief Predict target feature for input data; n-ary classification for single feature supported. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template<typename T> void rfClassifier<T>::predict(const cumlHandle& user_handle, const T * input, int n_rows, int n_cols, int * predictions, bool verbose) const { ASSERT(this->trees, "Cannot predict! No trees in the forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); ASSERT(predictions != nullptr, "Error! User has not allocated memory for predictions."); int row_size = n_cols; for (int row_id = 0; row_id < n_rows; row_id++) { if (verbose) { std::cout << "\n\n"; std::cout << "Predict for sample: "; for (int i = 0; i < n_cols; i++) std::cout << input[row_id*row_size + i] << ", "; std::cout << std::endl; } std::map<int, int> prediction_to_cnt; std::pair<std::map<int, int>::iterator, bool> ret; int max_cnt_so_far = 0; int majority_prediction = -1; for (int i = 0; i < this->rf_params.n_trees; i++) { //Return prediction for one sample. if (verbose) { std::cout << "Printing tree " << i << std::endl; this->trees[i].print(); } int prediction; this->trees[i].predict(user_handle, &input[row_id * row_size], 1, n_cols, &prediction, verbose); ret = prediction_to_cnt.insert(std::pair<int, int>(prediction, 1)); if (!(ret.second)) { ret.first->second += 1; } if (max_cnt_so_far < ret.first->second) { max_cnt_so_far = ret.first->second; majority_prediction = ret.first->first; } } predictions[row_id] = majority_prediction; } } /** * @brief Predict target feature for input data and validate against ref_labels. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template<typename T> RF_metrics rfClassifier<T>::cross_validate(const cumlHandle& user_handle, const T * input, const int * ref_labels, int n_rows, int n_cols, int * predictions, bool verbose) const { predict(user_handle, input, n_rows, n_cols, predictions, verbose); unsigned long long correctly_predicted = 0ULL; for (int i = 0; i < n_rows; i++) { correctly_predicted += (predictions[i] == ref_labels[i]); } float accuracy = correctly_predicted * 1.0f/n_rows; RF_metrics stats(accuracy); if (verbose) stats.print(); /* TODO: Potentially augment RF_metrics w/ more metrics (e.g., precision, F1, etc.). For non binary classification problems (i.e., one target and > 2 labels), need avg for each of these metrics */ return stats; } template class rf<float>; template class rf<double>; template class rfClassifier<float>; template class rfClassifier<double>; // Stateless API functions: fit, predict and cross_validate /** * @brief Build (i.e., fit, train) random forest classifier for input data of type float. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<float> * rf_classifier, float * input, int n_rows, int n_cols, int * labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Build (i.e., fit, train) random forest classifier for input data of type double. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<double> * rf_classifier, double * input, int n_rows, int n_cols, int * labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Predict target feature for input data of type float; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<float> * rf_classifier, const float * input, int n_rows, int n_cols, int * predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<double> * rf_classifier, const double * input, int n_rows, int n_cols, int * predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type float and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics cross_validate(const cumlHandle& user_handle, const rfClassifier<float> * rf_classifier, const float * input, const int * ref_labels, int n_rows, int n_cols, int * predictions, bool verbose) { return rf_classifier->cross_validate(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics cross_validate(const cumlHandle& user_handle, const rfClassifier<double> * rf_classifier, const double * input, const int * ref_labels, int n_rows, int n_cols, int * predictions, bool verbose) { return rf_classifier->cross_validate(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } }; // end namespace ML
d528ef4f61f9b55a4e7de4b84e46479041cc3a39.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "randomforest.h" namespace ML { /** * @brief Construct RF_metrics. * @param[in] cfg_accuracy: accuracy. */ RF_metrics::RF_metrics(float cfg_accuracy) : accuracy(cfg_accuracy) {}; /** * @brief Print accuracy metric. */ void RF_metrics::print() { std::cout << "Accuracy: " << accuracy << std::endl; } /** * @brief Update labels so they are unique from 0 to n_unique_labels values. Create/update an old label to new label map per random forest. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in,out] labels_map: map of old label values to new ones. * @param[in] verbose: debugging flag. */ void preprocess_labels(int n_rows, std::vector<int> & labels, std::map<int, int> & labels_map, bool verbose) { std::pair<std::map<int, int>::iterator, bool> ret; int n_unique_labels = 0; if (verbose) std::cout << "Preprocessing labels\n"; for (int i = 0; i < n_rows; i++) { ret = labels_map.insert(std::pair<int, int>(labels[i], n_unique_labels)); if (ret.second) { n_unique_labels += 1; } if (verbose) std::cout << "Mapping " << labels[i] << " to "; labels[i] = ret.first->second; //Update labels **IN-PLACE** if (verbose) std::cout << labels[i] << std::endl; } if (verbose) std::cout << "Finished preprocessing labels\n"; } /** * @brief Revert label preprocessing effect, if needed. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in] labels_map: map of old to new label values used during preprocessing. * @param[in] verbose: debugging flag. */ void postprocess_labels(int n_rows, std::vector<int> & labels, std::map<int, int> & labels_map, bool verbose) { if (verbose) std::cout << "Postrocessing labels\n"; std::map<int, int>::iterator it; int n_unique_cnt = labels_map.size(); std::vector<int> reverse_map; reverse_map.resize(n_unique_cnt); for (auto it = labels_map.begin(); it != labels_map.end(); it++) { reverse_map[it->second] = it->first; } for (int i = 0; i < n_rows; i++) { if (verbose) std::cout << "Mapping " << labels[i] << " back to " << reverse_map[labels[i]] << std::endl; labels[i] = reverse_map[labels[i]]; } if (verbose) std::cout << "Finished postrocessing labels\n"; } /** * @brief Random forest hyper-parameter object constructor to set n_trees member. */ RF_params::RF_params(int cfg_n_trees):n_trees(cfg_n_trees) {} /** * @brief Random forest hyper-parameter object constructor to set bootstrap, bootstrap_features, n_trees and rows_sample members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample):bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Random forest hyper-parameter object constructor to set all RF_params members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample, DecisionTree::DecisionTreeParams cfg_tree_params):bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample), tree_params(cfg_tree_params) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Check validity of all random forest hyper-parameters. */ void RF_params::validity_check() const { ASSERT((n_trees > 0), "Invalid n_trees %d", n_trees); ASSERT((rows_sample > 0) && (rows_sample <= 1.0), "rows_sample value %f outside permitted (0, 1] range", rows_sample); tree_params.validity_check(); } /** * @brief Print all random forest hyper-parameters. */ void RF_params::print() const { std::cout << "bootstrap: " << bootstrap << std::endl; std::cout << "bootstrap features: " << bootstrap_features << std::endl; std::cout << "n_trees: " << n_trees << std::endl; std::cout << "rows_sample: " << rows_sample << std::endl; tree_params.print(); } /** * @brief Construct rf (random forest) object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. * @param[in] cfg_rf_type: Random forest type. Only CLASSIFICATION is currently supported. */ template<typename T> rf<T>::rf(RF_params cfg_rf_params, int cfg_rf_type):rf_params(cfg_rf_params), rf_type(cfg_rf_type), trees(nullptr) { rf_params.validity_check(); } /** * @brief Destructor for random forest object. * @tparam T: data type for input data (float or double). */ template<typename T> rf<T>::~rf() { delete [] trees; } /** * @brief Return number of trees in the forest. * @tparam T: data type for input data (float or double). */ template<typename T> int rf<T>::get_ntrees() { return rf_params.n_trees; } /** * @brief Print summary for all trees in the random forest. * @tparam T: data type for input data (float or double). */ template<typename T> void rf<T>::print_rf_summary() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print_tree_summary(); } } } /** * @brief Print detailed view of all trees in the random forest. * @tparam T: data type for input data (float or double). */ template<typename T> void rf<T>::print_rf_detailed() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print(); } } } /** * @brief Construct rfClassifier object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. */ template <typename T> rfClassifier<T>::rfClassifier(RF_params cfg_rf_params): rf<T>::rf(cfg_rf_params, RF_type::CLASSIFICATION) {}; /** * @brief Build (i.e., fit, train) random forest classifier for input data. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ template <typename T> void rfClassifier<T>::fit(const cumlHandle& user_handle, T * input, int n_rows, int n_cols, int * labels, int n_unique_labels) { ASSERT(!this->trees, "Cannot fit an existing forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); rfClassifier::trees = new DecisionTree::DecisionTreeClassifier<T>[this->rf_params.n_trees]; int n_sampled_rows = this->rf_params.rows_sample * n_rows; const cumlHandle_impl& handle = user_handle.getImpl(); cudaStream_t stream = user_handle.getStream(); for (int i = 0; i < this->rf_params.n_trees; i++) { // Select n_sampled_rows (with replacement) numbers from [0, n_rows) per tree. // selected_rows: randomly generated IDs for bootstrapped samples (w/ replacement); a device ptr. MLCommon::device_buffer<unsigned int> selected_rows(handle.getDeviceAllocator(), stream, n_sampled_rows); if (this->rf_params.bootstrap) { MLCommon::Random::Rng r(i * 1000); // Ensure the seed for each tree is different and meaningful. r.uniformInt(selected_rows.data(), n_sampled_rows, (unsigned int) 0, (unsigned int) n_rows, stream); } else { std::vector<unsigned int> h_selected_rows(n_rows); std::iota(h_selected_rows.begin(), h_selected_rows.end(), 0); std::random_shuffle(h_selected_rows.begin(), h_selected_rows.end()); h_selected_rows.resize(n_sampled_rows); MLCommon::updateDevice(selected_rows.data(), h_selected_rows.data(), n_sampled_rows, stream); } /* Build individual tree in the forest. - input is a pointer to orig data that have n_cols features and n_rows rows. - n_sampled_rows: # rows sampled for tree's bootstrap sample. - selected_rows: points to a list of row #s (w/ n_sampled_rows elements) used to build the bootstrapped sample. Expectation: Each tree node will contain (a) # n_sampled_rows and (b) a pointer to a list of row numbers w.r.t original data. */ this->trees[i].fit(user_handle, input, n_cols, n_rows, labels, selected_rows.data(), n_sampled_rows, n_unique_labels, this->rf_params.tree_params); //Cleanup selected_rows.release(stream); } } /** * @brief Predict target feature for input data; n-ary classification for single feature supported. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template<typename T> void rfClassifier<T>::predict(const cumlHandle& user_handle, const T * input, int n_rows, int n_cols, int * predictions, bool verbose) const { ASSERT(this->trees, "Cannot predict! No trees in the forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); ASSERT(predictions != nullptr, "Error! User has not allocated memory for predictions."); int row_size = n_cols; for (int row_id = 0; row_id < n_rows; row_id++) { if (verbose) { std::cout << "\n\n"; std::cout << "Predict for sample: "; for (int i = 0; i < n_cols; i++) std::cout << input[row_id*row_size + i] << ", "; std::cout << std::endl; } std::map<int, int> prediction_to_cnt; std::pair<std::map<int, int>::iterator, bool> ret; int max_cnt_so_far = 0; int majority_prediction = -1; for (int i = 0; i < this->rf_params.n_trees; i++) { //Return prediction for one sample. if (verbose) { std::cout << "Printing tree " << i << std::endl; this->trees[i].print(); } int prediction; this->trees[i].predict(user_handle, &input[row_id * row_size], 1, n_cols, &prediction, verbose); ret = prediction_to_cnt.insert(std::pair<int, int>(prediction, 1)); if (!(ret.second)) { ret.first->second += 1; } if (max_cnt_so_far < ret.first->second) { max_cnt_so_far = ret.first->second; majority_prediction = ret.first->first; } } predictions[row_id] = majority_prediction; } } /** * @brief Predict target feature for input data and validate against ref_labels. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template<typename T> RF_metrics rfClassifier<T>::cross_validate(const cumlHandle& user_handle, const T * input, const int * ref_labels, int n_rows, int n_cols, int * predictions, bool verbose) const { predict(user_handle, input, n_rows, n_cols, predictions, verbose); unsigned long long correctly_predicted = 0ULL; for (int i = 0; i < n_rows; i++) { correctly_predicted += (predictions[i] == ref_labels[i]); } float accuracy = correctly_predicted * 1.0f/n_rows; RF_metrics stats(accuracy); if (verbose) stats.print(); /* TODO: Potentially augment RF_metrics w/ more metrics (e.g., precision, F1, etc.). For non binary classification problems (i.e., one target and > 2 labels), need avg for each of these metrics */ return stats; } template class rf<float>; template class rf<double>; template class rfClassifier<float>; template class rfClassifier<double>; // Stateless API functions: fit, predict and cross_validate /** * @brief Build (i.e., fit, train) random forest classifier for input data of type float. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<float> * rf_classifier, float * input, int n_rows, int n_cols, int * labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Build (i.e., fit, train) random forest classifier for input data of type double. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<double> * rf_classifier, double * input, int n_rows, int n_cols, int * labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Predict target feature for input data of type float; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<float> * rf_classifier, const float * input, int n_rows, int n_cols, int * predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<double> * rf_classifier, const double * input, int n_rows, int n_cols, int * predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type float and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics cross_validate(const cumlHandle& user_handle, const rfClassifier<float> * rf_classifier, const float * input, const int * ref_labels, int n_rows, int n_cols, int * predictions, bool verbose) { return rf_classifier->cross_validate(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics cross_validate(const cumlHandle& user_handle, const rfClassifier<double> * rf_classifier, const double * input, const int * ref_labels, int n_rows, int n_cols, int * predictions, bool verbose) { return rf_classifier->cross_validate(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } }; // end namespace ML
075ece331573d859eeb5165a5a14536e11cf9125.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <chrono> #include "dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> // extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { auto state = at::globalContext().getTHCState(); int old_device = -1; THCudaCheck(hipGetDevice(&old_device)); if(old_device==input.device().index()){ old_device = -1; }else{ THCudaCheck(hipSetDevice(input.device().index())); } using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::zeros({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::zeros({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; // at::native::baddbmm__cuda(output, ones, bias, 1, 0); THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; // at::native::baddbmm__cuda(output, columns, weight, 1, 1); THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); if(old_device>=0){ THCudaCheck(hipSetDevice(old_device)); } return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { auto state = at::globalContext().getTHCState(); THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemv(state, 't', k_, m_, 1.0f, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
075ece331573d859eeb5165a5a14536e11cf9125.cu
#include <iostream> #include <vector> #include <chrono> #include "dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> // extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { auto state = at::globalContext().getTHCState(); int old_device = -1; THCudaCheck(cudaGetDevice(&old_device)); if(old_device==input.device().index()){ old_device = -1; }else{ THCudaCheck(cudaSetDevice(input.device().index())); } using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::zeros({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::zeros({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; // at::native::baddbmm__cuda(output, ones, bias, 1, 0); THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; // at::native::baddbmm__cuda(output, columns, weight, 1, 1); THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); if(old_device>=0){ THCudaCheck(cudaSetDevice(old_device)); } return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { auto state = at::globalContext().getTHCState(); THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(THCState_getCurrentStream(state), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemv(state, 't', k_, m_, 1.0f, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
b1a3b79e7a9fc83f3814ac234df8f5e1d2402beb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void compute_probs(float* alphas, float* rands, float* probs, int n, int K, int M) { // assign overall id/index of the thread = id of row int i = blockIdx.x * blockDim.x + threadIdx.x; int threads_per_block = blockDim.x; // set up shared memory: half for probs and half for w extern __shared__ float shared[]; float* probs_shared = shared; float* w = &shared[K*threads_per_block]; // shared mem is one big block, so need to index into latter portion of it to use for w if(i < n) { float maxval; int m, k; int maxind; float M_d = (float) M; // initialize shared memory probs for(k = 0; k < K; ++k) { probs_shared[k*threads_per_block + threadIdx.x] = 0.0; } // core computations for(m = 0; m < M; ++m){ // loop over Monte Carlo iterations for(k = 0; k < K; ++k){ // generate W ~ N(alpha, 1) w[k*threads_per_block + threadIdx.x] = alphas[k*n + i] + rands[k*M + m]; } maxind = K-1; maxval = w[(K-1)*threads_per_block + threadIdx.x]; for(k = 0; k < (K-1); ++k){ if(w[k*threads_per_block + threadIdx.x] > maxval){ maxind = k; maxval = w[k*threads_per_block + threadIdx.x]; } } probs_shared[maxind*threads_per_block + threadIdx.x] += 1.0; } for(k = 0; k < K; ++k) { probs_shared[k*threads_per_block + threadIdx.x] /= M_d; } // copy to device memory so can be returned to CPU for(k = 0; k < K; ++k) { probs[k*n + i] = probs_shared[k*threads_per_block + threadIdx.x]; } } }
b1a3b79e7a9fc83f3814ac234df8f5e1d2402beb.cu
extern "C" __global__ void compute_probs(float* alphas, float* rands, float* probs, int n, int K, int M) { // assign overall id/index of the thread = id of row int i = blockIdx.x * blockDim.x + threadIdx.x; int threads_per_block = blockDim.x; // set up shared memory: half for probs and half for w extern __shared__ float shared[]; float* probs_shared = shared; float* w = &shared[K*threads_per_block]; // shared mem is one big block, so need to index into latter portion of it to use for w if(i < n) { float maxval; int m, k; int maxind; float M_d = (float) M; // initialize shared memory probs for(k = 0; k < K; ++k) { probs_shared[k*threads_per_block + threadIdx.x] = 0.0; } // core computations for(m = 0; m < M; ++m){ // loop over Monte Carlo iterations for(k = 0; k < K; ++k){ // generate W ~ N(alpha, 1) w[k*threads_per_block + threadIdx.x] = alphas[k*n + i] + rands[k*M + m]; } maxind = K-1; maxval = w[(K-1)*threads_per_block + threadIdx.x]; for(k = 0; k < (K-1); ++k){ if(w[k*threads_per_block + threadIdx.x] > maxval){ maxind = k; maxval = w[k*threads_per_block + threadIdx.x]; } } probs_shared[maxind*threads_per_block + threadIdx.x] += 1.0; } for(k = 0; k < K; ++k) { probs_shared[k*threads_per_block + threadIdx.x] /= M_d; } // copy to device memory so can be returned to CPU for(k = 0; k < K; ++k) { probs[k*n + i] = probs_shared[k*threads_per_block + threadIdx.x]; } } }
925c88c114c986c31804ef4524d53f9756aea662.hip
// !!! This is a file automatically generated by hipify!!! /* ** Hello World using CUDA ** ** The string "Hello World!" is mangled then restored using a common CUDA idiom ** ** Byron Galbraith ** 2009-02-18 */ #include <hip/hip_runtime.h> #include <stdio.h> // Prototypes __global__ void helloWorld(char*); void devicenfo(void); // Host function int main(int argc, char** argv) { int i; //Prints out device info devicenfo(); // desired output char str[] = "Hello World!"; // mangle contents of output // the null character is left intact for simplicity for(i = 0; i < 12; i++) str[i] -= i; // allocate memory on the device char *d_str; size_t size = sizeof(str); hipMalloc((void**)&d_str, size); // copy the string to the device hipMemcpy(d_str, str, size, hipMemcpyHostToDevice); // set the grid and block sizes dim3 dimGrid(2); // one block per word dim3 dimBlock(6); // one thread per character // invoke the kernel hipLaunchKernelGGL(( helloWorld), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_str); // retrieve the results from the device hipMemcpy(str, d_str, size, hipMemcpyDeviceToHost); // free up the allocated memory on the device hipFree(d_str); // everyone's favorite part printf("%s\n", str); return 0; } // Device kernel __global__ void helloWorld(char* str) { // determine where in the thread grid we are int idx = blockIdx.x * blockDim.x + threadIdx.x; // unmangle output str[idx] += idx; } // Device info void devicenfo(void) { struct hipDeviceProp_t capabilities; hipGetDeviceProperties (&capabilities, 0); printf("->CUDA Platform & Capabilities\n"); printf("Name: %s\n", capabilities.name); printf("totalGlobalMem: %.2f MB\n", capabilities.totalGlobalMem/1024.0f/1024.0f); printf("sharedMemPerBlock: %.2f KB\n", capabilities.sharedMemPerBlock/1024.0f); printf("regsPerBlock (32 bits): %d\n", capabilities.regsPerBlock); printf("warpSize: %d\n", capabilities.warpSize); printf("memPitch: %.2f KB\n", capabilities.memPitch/1024.0f); printf("maxThreadsPerBlock: %d\n", capabilities.maxThreadsPerBlock); printf("maxThreadsDim: %d x %d x %d\n", capabilities.maxThreadsDim[0], capabilities.maxThreadsDim[1], capabilities.maxThreadsDim[2]); printf("maxGridSize: %d x %d\n", capabilities.maxGridSize[0], capabilities.maxGridSize[1]); printf("totalConstMem: %.2f KB\n", capabilities.totalConstMem/1024.0f); printf("major.minor: %d.%d\n", capabilities.major, capabilities.minor); printf("clockRate: %.2f MHz\n", capabilities.clockRate/1024.0f); printf("textureAlignment: %d\n", capabilities.textureAlignment); printf("deviceOverlap: %d\n", capabilities.deviceOverlap); printf("multiProcessorCount: %d\n", capabilities.multiProcessorCount); }
925c88c114c986c31804ef4524d53f9756aea662.cu
/* ** Hello World using CUDA ** ** The string "Hello World!" is mangled then restored using a common CUDA idiom ** ** Byron Galbraith ** 2009-02-18 */ #include <cuda.h> #include <stdio.h> // Prototypes __global__ void helloWorld(char*); void devicenfo(void); // Host function int main(int argc, char** argv) { int i; //Prints out device info devicenfo(); // desired output char str[] = "Hello World!"; // mangle contents of output // the null character is left intact for simplicity for(i = 0; i < 12; i++) str[i] -= i; // allocate memory on the device char *d_str; size_t size = sizeof(str); cudaMalloc((void**)&d_str, size); // copy the string to the device cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice); // set the grid and block sizes dim3 dimGrid(2); // one block per word dim3 dimBlock(6); // one thread per character // invoke the kernel helloWorld<<< dimGrid, dimBlock >>>(d_str); // retrieve the results from the device cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost); // free up the allocated memory on the device cudaFree(d_str); // everyone's favorite part printf("%s\n", str); return 0; } // Device kernel __global__ void helloWorld(char* str) { // determine where in the thread grid we are int idx = blockIdx.x * blockDim.x + threadIdx.x; // unmangle output str[idx] += idx; } // Device info void devicenfo(void) { struct cudaDeviceProp capabilities; cudaGetDeviceProperties (&capabilities, 0); printf("->CUDA Platform & Capabilities\n"); printf("Name: %s\n", capabilities.name); printf("totalGlobalMem: %.2f MB\n", capabilities.totalGlobalMem/1024.0f/1024.0f); printf("sharedMemPerBlock: %.2f KB\n", capabilities.sharedMemPerBlock/1024.0f); printf("regsPerBlock (32 bits): %d\n", capabilities.regsPerBlock); printf("warpSize: %d\n", capabilities.warpSize); printf("memPitch: %.2f KB\n", capabilities.memPitch/1024.0f); printf("maxThreadsPerBlock: %d\n", capabilities.maxThreadsPerBlock); printf("maxThreadsDim: %d x %d x %d\n", capabilities.maxThreadsDim[0], capabilities.maxThreadsDim[1], capabilities.maxThreadsDim[2]); printf("maxGridSize: %d x %d\n", capabilities.maxGridSize[0], capabilities.maxGridSize[1]); printf("totalConstMem: %.2f KB\n", capabilities.totalConstMem/1024.0f); printf("major.minor: %d.%d\n", capabilities.major, capabilities.minor); printf("clockRate: %.2f MHz\n", capabilities.clockRate/1024.0f); printf("textureAlignment: %d\n", capabilities.textureAlignment); printf("deviceOverlap: %d\n", capabilities.deviceOverlap); printf("multiProcessorCount: %d\n", capabilities.multiProcessorCount); }
64f92844777286979a63ace88dc0527034e2a7e0.hip
// !!! This is a file automatically generated by hipify!!! #include "virtual_memory.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include<math.h> #include<math_functions.h> #include<time.h> // this cuda is copied from the internet struct Lock { int *mutex; __device__ Lock(void) { #if __CUDA_ARCH__ >= 200 mutex = new int; (*mutex) = 0; #endif } __device__ ~Lock(void) { #if __CUDA_ARCH__ >= 200 delete mutex; #endif } __device__ void lock(void) { #if __CUDA_ARCH__ >= 200 while (atomicCAS(mutex, 0, 1) != 0); #endif } __device__ void unlock(void) { #if __CUDA_ARCH__ >= 200 atomicExch(mutex, 0); #endif } }; __device__ void init_invert_page_table(VirtualMemory *vm) { for (int i = 0; i < vm->PAGE_ENTRIES; i++) { vm->invert_page_table[i] = 0x80000000; // invalid := MSB is 1 vm->invert_page_table[i + vm->PAGE_ENTRIES] = 0x80000000; //linked_list_prev vm->invert_page_table[i + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; //linked_list_next } vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = 0x80000000; //head vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = 0x80000000; //tail } __device__ void vm_init(VirtualMemory *vm, uchar *buffer, uchar *storage, u32 *invert_page_table, int *pagefault_num_ptr, int PAGESIZE, int INVERT_PAGE_TABLE_SIZE, int PHYSICAL_MEM_SIZE, int STORAGE_SIZE, int PAGE_ENTRIES) { // init variables vm->buffer = buffer; vm->storage = storage; vm->invert_page_table = invert_page_table; vm->pagefault_num_ptr = pagefault_num_ptr; // init constants vm->PAGESIZE = PAGESIZE; vm->INVERT_PAGE_TABLE_SIZE = INVERT_PAGE_TABLE_SIZE; vm->PHYSICAL_MEM_SIZE = PHYSICAL_MEM_SIZE; vm->STORAGE_SIZE = STORAGE_SIZE; vm->PAGE_ENTRIES = PAGE_ENTRIES; // before first vm_write or vm_read init_invert_page_table(vm); } __device__ uchar vm_read(VirtualMemory *vm, u32 addr) { Lock myLock; int index = threadIdx.x; myLock.lock(); /* Complate vm_read function to read single element from data buffer */ uchar value; uint32_t page_num = addr / 32; uint32_t offset = addr % 32; uint32_t physical_addr; int next_index; int prev_index; int tail_index; int head_index; int flag_in = -1; //mark the position that page number can be found int flag_ava = -1; //mark the first available page number position for (int i = 0; i < vm->PAGE_ENTRIES; i++) { if (vm->invert_page_table[i] == page_num) { //we find the page number in the page table flag_in = i; break; } } if (flag_in != -1) { physical_addr = (flag_in) * 32 + offset; value = vm->buffer[physical_addr]; head_index = vm->invert_page_table[3 * (vm->PAGE_ENTRIES)]; if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == flag_in) { //linked list next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = 0x80000000; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = next_index; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; vm->invert_page_table[flag_in + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; } else if (vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] == flag_in) { } else { //page prev_index = vm->invert_page_table[flag_in + vm->PAGE_ENTRIES]; next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[prev_index + 2 * (vm->PAGE_ENTRIES)] = next_index; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = prev_index; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; //tail indexpivot vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; //pivotinvalid vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[flag_in + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; //tail flag_in } } else { //page is not find in the page table *vm->pagefault_num_ptr += 1; head_index = vm->invert_page_table[3 * (vm->PAGE_ENTRIES)]; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; // uint32_t secondary_addr = 32 * vm->invert_page_table[head_index]; for (int n = 0; n < 32; n++) { vm->storage[secondary_addr + n] = vm->buffer[32 * head_index + n]; } vm->invert_page_table[head_index] = page_num; next_index = vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = 0x80000000; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = next_index; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = head_index; vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = head_index; for (int l = 0; l < 32; l++) { vm->buffer[32 * head_index + l] = vm->storage[32 * vm->invert_page_table[head_index] + l]; } value = vm->buffer[32 * head_index + offset]; } return value; //TODO myLock.unlock(); } __device__ void vm_write(VirtualMemory *vm, u32 addr, uchar value) { Lock myLock; int index = threadIdx.x; myLock.lock(); /* Complete vm_write function to write value into data buffer */ uint32_t page_num = addr / 32; uint32_t offset = addr % 32; uint32_t physical_addr; int flag_in = -1; //mark the position that page number can be found int flag_ava = -1; //mark the first available page number position uint32_t next_index; uint32_t prev_index; uint32_t tail_index; uint32_t head_index; for (int i = 0; i < vm->PAGE_ENTRIES; i++) { if (vm->invert_page_table[i] == page_num) { //we find the page number in the page table flag_in = i; break; } } if (flag_in != -1) { //page is in the page table physical_addr = (flag_in) * 32 + offset; vm->buffer[physical_addr] = value; // vm->invert_page_table[flag_in + 2*(vm->PAGE_ENTRIES)] = 0; if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == flag_in && vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] == flag_in) { //page entry } else if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == flag_in) { //page entry // next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = next_index; //index vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = 0x80000000; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; //tail indexpivot vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; //pivotinvalid vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; //tail flag_in } else if (vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] == flag_in) { } else { //page prev_index = vm->invert_page_table[flag_in + vm->PAGE_ENTRIES]; next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[prev_index + 2 * (vm->PAGE_ENTRIES)] = next_index; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = prev_index; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; //tail indexpivot vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; //pivotinvalid vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; //tail flag_in } } else { //page is not in the page table *vm->pagefault_num_ptr += 1; if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == 0x80000000) { //pagetable[0] vm->invert_page_table[0] = page_num; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = uint32_t(0); //head vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = uint32_t(0); //tail physical_addr = 0 * 32 + offset; vm->buffer[physical_addr] = value; } else { for (int j = 0; j < vm->PAGE_ENTRIES; j++) { if (vm->invert_page_table[j] == 0x80000000) { flag_ava = j; //we find the first empty entry at pos j break; } } if (flag_ava != -1) { vm->invert_page_table[flag_ava] = page_num; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_ava; vm->invert_page_table[flag_ava + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[flag_ava + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = uint32_t(flag_ava); physical_addr = (flag_ava) * 32 + offset; vm->buffer[physical_addr] = value; } else { //the page table is full now, we have to replace the LRU head_index = vm->invert_page_table[3 * (vm->PAGE_ENTRIES)]; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; uint32_t secondary_addr = 32 * vm->invert_page_table[head_index]; for (int l = 0; l < 32; l++) { vm->storage[secondary_addr + l] = vm->buffer[32 * head_index + l]; } vm->invert_page_table[head_index] = page_num; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)]; next_index = vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[next_index + (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = head_index; vm->invert_page_table[head_index + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = head_index; vm->buffer[32 * head_index + offset] = value; } } } myLock.unlock(); } __device__ void vm_snapshot(VirtualMemory *vm, uchar *results, int offset, int input_size) { /* Complete snapshot function togther with vm_read to load elements from data * to result buffer */ for (int i = 0; i < input_size; i+=4) { results[i + threadIdx.x] = vm_read(vm, i + threadIdx.x); __syncthreads(); } }
64f92844777286979a63ace88dc0527034e2a7e0.cu
#include "virtual_memory.h" #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include<math.h> #include<math_functions.h> #include<time.h> // this cuda 原子锁 is copied from the internet struct Lock { int *mutex; __device__ Lock(void) { #if __CUDA_ARCH__ >= 200 mutex = new int; (*mutex) = 0; #endif } __device__ ~Lock(void) { #if __CUDA_ARCH__ >= 200 delete mutex; #endif } __device__ void lock(void) { #if __CUDA_ARCH__ >= 200 while (atomicCAS(mutex, 0, 1) != 0); #endif } __device__ void unlock(void) { #if __CUDA_ARCH__ >= 200 atomicExch(mutex, 0); #endif } }; __device__ void init_invert_page_table(VirtualMemory *vm) { for (int i = 0; i < vm->PAGE_ENTRIES; i++) { vm->invert_page_table[i] = 0x80000000; // invalid := MSB is 1 vm->invert_page_table[i + vm->PAGE_ENTRIES] = 0x80000000; //linked_list_prev vm->invert_page_table[i + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; //linked_list_next } vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = 0x80000000; //head vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = 0x80000000; //tail } __device__ void vm_init(VirtualMemory *vm, uchar *buffer, uchar *storage, u32 *invert_page_table, int *pagefault_num_ptr, int PAGESIZE, int INVERT_PAGE_TABLE_SIZE, int PHYSICAL_MEM_SIZE, int STORAGE_SIZE, int PAGE_ENTRIES) { // init variables vm->buffer = buffer; vm->storage = storage; vm->invert_page_table = invert_page_table; vm->pagefault_num_ptr = pagefault_num_ptr; // init constants vm->PAGESIZE = PAGESIZE; vm->INVERT_PAGE_TABLE_SIZE = INVERT_PAGE_TABLE_SIZE; vm->PHYSICAL_MEM_SIZE = PHYSICAL_MEM_SIZE; vm->STORAGE_SIZE = STORAGE_SIZE; vm->PAGE_ENTRIES = PAGE_ENTRIES; // before first vm_write or vm_read init_invert_page_table(vm); } __device__ uchar vm_read(VirtualMemory *vm, u32 addr) { Lock myLock; int index = threadIdx.x; myLock.lock(); /* Complate vm_read function to read single element from data buffer */ uchar value; uint32_t page_num = addr / 32; uint32_t offset = addr % 32; uint32_t physical_addr; int next_index; int prev_index; int tail_index; int head_index; int flag_in = -1; //mark the position that page number can be found int flag_ava = -1; //mark the first available page number position for (int i = 0; i < vm->PAGE_ENTRIES; i++) { if (vm->invert_page_table[i] == page_num) { //we find the page number in the page table flag_in = i; break; } } if (flag_in != -1) { physical_addr = (flag_in) * 32 + offset; value = vm->buffer[physical_addr]; head_index = vm->invert_page_table[3 * (vm->PAGE_ENTRIES)]; if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == flag_in) { //找到的东西是linked list的头,我们移到尾巴去 next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = 0x80000000; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = next_index; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; vm->invert_page_table[flag_in + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; } else if (vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] == flag_in) { } else { //我们找到的page是在中间 prev_index = vm->invert_page_table[flag_in + vm->PAGE_ENTRIES]; next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[prev_index + 2 * (vm->PAGE_ENTRIES)] = next_index; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = prev_index; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; //把tail index的下一个指向我们的pivot vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; //把pivot的下一个变成invalid vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[flag_in + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; //tail 变成 flag_in } } else { //page is not find in the page table *vm->pagefault_num_ptr += 1; head_index = vm->invert_page_table[3 * (vm->PAGE_ENTRIES)]; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; //我们把东西都搬下去 uint32_t secondary_addr = 32 * vm->invert_page_table[head_index]; for (int n = 0; n < 32; n++) { vm->storage[secondary_addr + n] = vm->buffer[32 * head_index + n]; } vm->invert_page_table[head_index] = page_num; next_index = vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = 0x80000000; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = next_index; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = head_index; vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = head_index; for (int l = 0; l < 32; l++) { vm->buffer[32 * head_index + l] = vm->storage[32 * vm->invert_page_table[head_index] + l]; } value = vm->buffer[32 * head_index + offset]; } return value; //TODO myLock.unlock(); } __device__ void vm_write(VirtualMemory *vm, u32 addr, uchar value) { Lock myLock; int index = threadIdx.x; myLock.lock(); /* Complete vm_write function to write value into data buffer */ uint32_t page_num = addr / 32; uint32_t offset = addr % 32; uint32_t physical_addr; int flag_in = -1; //mark the position that page number can be found int flag_ava = -1; //mark the first available page number position uint32_t next_index; uint32_t prev_index; uint32_t tail_index; uint32_t head_index; for (int i = 0; i < vm->PAGE_ENTRIES; i++) { if (vm->invert_page_table[i] == page_num) { //we find the page number in the page table flag_in = i; break; } } if (flag_in != -1) { //page is in the page table physical_addr = (flag_in) * 32 + offset; vm->buffer[physical_addr] = value; // vm->invert_page_table[flag_in + 2*(vm->PAGE_ENTRIES)] = 0; if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == flag_in && vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] == flag_in) { //如果我们找到的page entry是整个的头,头部尾部都是他 } else if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == flag_in) { //如果我们找到的page entry是整个的头,我们要把他移到尾部去 //把头指向下一个 next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = next_index; //头变成下一个index vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = 0x80000000; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; //把tail index的下一个指向我们的pivot vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; //把pivot的下一个变成invalid vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; //tail 变成 flag_in } else if (vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] == flag_in) { } else { //我们找到的page是在中间 prev_index = vm->invert_page_table[flag_in + vm->PAGE_ENTRIES]; next_index = vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[prev_index + 2 * (vm->PAGE_ENTRIES)] = next_index; vm->invert_page_table[next_index + vm->PAGE_ENTRIES] = prev_index; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; //把tail index的下一个指向我们的pivot vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_in; //把pivot的下一个变成invalid vm->invert_page_table[flag_in + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = flag_in; //tail 变成 flag_in } } else { //page is not in the page table *vm->pagefault_num_ptr += 1; if (vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] == 0x80000000) { //新写入一个,这个东西在pagetable[0]的位置上 vm->invert_page_table[0] = page_num; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = uint32_t(0); //head vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = uint32_t(0); //tail physical_addr = 0 * 32 + offset; vm->buffer[physical_addr] = value; } else { for (int j = 0; j < vm->PAGE_ENTRIES; j++) { if (vm->invert_page_table[j] == 0x80000000) { flag_ava = j; //we find the first empty entry at pos j break; } } if (flag_ava != -1) { vm->invert_page_table[flag_ava] = page_num; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = flag_ava; vm->invert_page_table[flag_ava + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[flag_ava + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = uint32_t(flag_ava); physical_addr = (flag_ava) * 32 + offset; vm->buffer[physical_addr] = value; } else { //the page table is full now, we have to replace the LRU head_index = vm->invert_page_table[3 * (vm->PAGE_ENTRIES)]; tail_index = vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)]; uint32_t secondary_addr = 32 * vm->invert_page_table[head_index]; for (int l = 0; l < 32; l++) { vm->storage[secondary_addr + l] = vm->buffer[32 * head_index + l]; } vm->invert_page_table[head_index] = page_num; vm->invert_page_table[3 * (vm->PAGE_ENTRIES)] = vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)]; next_index = vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)]; vm->invert_page_table[next_index + (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[tail_index + 2 * (vm->PAGE_ENTRIES)] = head_index; vm->invert_page_table[head_index + (vm->PAGE_ENTRIES)] = tail_index; vm->invert_page_table[head_index + 2 * (vm->PAGE_ENTRIES)] = 0x80000000; vm->invert_page_table[1 + 3 * (vm->PAGE_ENTRIES)] = head_index; vm->buffer[32 * head_index + offset] = value; } } } myLock.unlock(); } __device__ void vm_snapshot(VirtualMemory *vm, uchar *results, int offset, int input_size) { /* Complete snapshot function togther with vm_read to load elements from data * to result buffer */ for (int i = 0; i < input_size; i+=4) { results[i + threadIdx.x] = vm_read(vm, i + threadIdx.x); __syncthreads(); } }
4aeb371a74c63cdaf51edfb6caabe84cce0513ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction kernels */ #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #include <stdio.h> // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double *() { extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const { extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /* This reduction interleaves which threads are active by using the modulo operator. This operator is very expensive on GPUs, and the interleaved inactivity means that no whole warps are active, which is also very inefficient */ template <class T> __global__ void reduce0(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2*s)) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses contiguous threads, but its interleaved addressing results in many shared memory bank conflicts. */ template <class T> __global__ void reduce1(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses sequential addressing -- no divergence or bank conflicts. */ template <class T> __global__ void reduce2(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses n/2 threads -- it performs the first level of reduction when reading from global memory. */ template <class T> __global__ void reduce3(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version uses the warp shuffle operation if available to reduce warp synchronization. When shuffle is not available the final warp's worth of work is unrolled to reduce looping overhead. See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ for additional information about using shuffle to perform a reduction within a warp. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce4(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version is completely unrolled, unless warp shuffle is available, then shuffle is used within a loop. It uses a template parameter to achieve optimal code for any (power of 2) number of threads. This requires a switch statement in the host code to handle all the different thread block sizes at compile time. When shuffle is available, it is used to reduce warp synchronization. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduceSq(T *g_idata, T *g_odata, T *g_odataSq, unsigned int n) { // T *sdata = SharedMemory<T>(); // T *sdataSq = &sdata[64]; __shared__ T sdata[blockSize * 2]; __shared__ T sdataSq[blockSize * 2]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; T mySumSq = 0; T temp; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { temp = g_idata[i]; mySum += temp; mySumSq += temp * temp; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n){ //mySum += g_idata[i+blockSize]; temp = g_idata[i+blockSize]; mySum += temp; mySumSq += temp * temp; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; sdataSq[tid] = mySumSq; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) { mySum += sdata[tid + 32]; mySumSq += sdataSq[tid + 32]; } // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); mySumSq += __shfl_down(mySumSq, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0){ g_odata[blockIdx.x] = mySum; g_odataSq[blockIdx.x] = mySumSq; } } extern "C" bool isPow2(unsigned int x); //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template <class T> void reduce(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch switch (whichKernel) { case 0: hipLaunchKernelGGL(( reduce0<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduce1<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduce2<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 3: hipLaunchKernelGGL(( reduce3<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: switch (threads) { case 512: hipLaunchKernelGGL(( reduce4<T, 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduce4<T, 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce4<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduce4<T, 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduce4<T, 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduce4<T, 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduce4<T, 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduce4<T, 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduce4<T, 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduce4<T, 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } break; case 5: switch (threads) { case 512: hipLaunchKernelGGL(( reduce5<T, 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduce5<T, 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce5<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduce5<T, 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduce5<T, 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduce5<T, 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduce5<T, 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduce5<T, 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduce5<T, 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduce5<T, 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } break; case 6: default: if (isPow2(size)) { switch (threads) { case 512: hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } else { switch (threads) { case 512: hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 32: hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 16: hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 8: hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 4: hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 2: hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 1: hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; } } break; } } // Instantiate the reduction function for 3 types template void reduce<int>(int size, int threads, int blocks, int whichKernel, int *d_idata, int *d_odata); template void reduce<float>(int size, int threads, int blocks, int whichKernel, float *d_idata, float *d_odata); template void reduce<double>(int size, int threads, int blocks, int whichKernel, double *d_idata, double *d_odata); template <class T> void reduceSq(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata, T *d_odataSq) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch switch (whichKernel) { case 6: default: if (isPow2(size)) { switch (threads) { case 512: hipLaunchKernelGGL(( reduceSq<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 256: hipLaunchKernelGGL(( reduceSq<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 128: hipLaunchKernelGGL(( reduceSq<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 64: hipLaunchKernelGGL(( reduceSq<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 32: hipLaunchKernelGGL(( reduceSq<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 16: hipLaunchKernelGGL(( reduceSq<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 8: hipLaunchKernelGGL(( reduceSq<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 4: hipLaunchKernelGGL(( reduceSq<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 2: hipLaunchKernelGGL(( reduceSq<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 1: hipLaunchKernelGGL(( reduceSq<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; } } else { switch (threads) { case 512: hipLaunchKernelGGL(( reduceSq<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 256: hipLaunchKernelGGL(( reduceSq<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 128: hipLaunchKernelGGL(( reduceSq<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 64: hipLaunchKernelGGL(( reduceSq<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 32: hipLaunchKernelGGL(( reduceSq<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 16: hipLaunchKernelGGL(( reduceSq<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 8: hipLaunchKernelGGL(( reduceSq<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 4: hipLaunchKernelGGL(( reduceSq<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 2: hipLaunchKernelGGL(( reduceSq<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; case 1: hipLaunchKernelGGL(( reduceSq<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, d_odataSq, size); break; } } break; } } template void reduceSq<int>(int size, int threads, int blocks, int whichKernel, int *d_idata, int *d_odata, int *d_odataSq); template void reduceSq<float>(int size, int threads, int blocks, int whichKernel, float *d_idata, float *d_odata, float *d_odataSq); template void reduceSq<double>(int size, int threads, int blocks, int whichKernel, double *d_idata, double *d_odata, double *d_odataSq); #endif // #ifndef _REDUCE_KERNEL_H_
4aeb371a74c63cdaf51edfb6caabe84cce0513ee.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction kernels */ #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #include <stdio.h> // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double *() { extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const { extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /* This reduction interleaves which threads are active by using the modulo operator. This operator is very expensive on GPUs, and the interleaved inactivity means that no whole warps are active, which is also very inefficient */ template <class T> __global__ void reduce0(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2*s)) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses contiguous threads, but its interleaved addressing results in many shared memory bank conflicts. */ template <class T> __global__ void reduce1(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses sequential addressing -- no divergence or bank conflicts. */ template <class T> __global__ void reduce2(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /* This version uses n/2 threads -- it performs the first level of reduction when reading from global memory. */ template <class T> __global__ void reduce3(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version uses the warp shuffle operation if available to reduce warp synchronization. When shuffle is not available the final warp's worth of work is unrolled to reduce looping overhead. See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ for additional information about using shuffle to perform a reduction within a warp. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce4(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version is completely unrolled, unless warp shuffle is available, then shuffle is used within a loop. It uses a template parameter to achieve optimal code for any (power of 2) number of threads. This requires a switch statement in the host code to handle all the different thread block sizes at compile time. When shuffle is available, it is used to reduce warp synchronization. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduceSq(T *g_idata, T *g_odata, T *g_odataSq, unsigned int n) { // T *sdata = SharedMemory<T>(); // T *sdataSq = &sdata[64]; __shared__ T sdata[blockSize * 2]; __shared__ T sdataSq[blockSize * 2]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; T mySumSq = 0; T temp; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { temp = g_idata[i]; mySum += temp; mySumSq += temp * temp; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n){ //mySum += g_idata[i+blockSize]; temp = g_idata[i+blockSize]; mySum += temp; mySumSq += temp * temp; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; sdataSq[tid] = mySumSq; __syncthreads(); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 256]; } __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 128]; } __syncthreads(); if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 64]; } __syncthreads(); #if (__CUDA_ARCH__ >= 300 ) if ( tid < 32 ) { // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) { mySum += sdata[tid + 32]; mySumSq += sdataSq[tid + 32]; } // Reduce final warp using shuffle for (int offset = warpSize/2; offset > 0; offset /= 2) { mySum += __shfl_down(mySum, offset); mySumSq += __shfl_down(mySumSq, offset); } } #else // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) { sdata[tid] = mySum = mySum + sdata[tid + 32]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 32]; } __syncthreads(); if ((blockSize >= 32) && (tid < 16)) { sdata[tid] = mySum = mySum + sdata[tid + 16]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 16]; } __syncthreads(); if ((blockSize >= 16) && (tid < 8)) { sdata[tid] = mySum = mySum + sdata[tid + 8]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 8]; } __syncthreads(); if ((blockSize >= 8) && (tid < 4)) { sdata[tid] = mySum = mySum + sdata[tid + 4]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 4]; } __syncthreads(); if ((blockSize >= 4) && (tid < 2)) { sdata[tid] = mySum = mySum + sdata[tid + 2]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 2]; } __syncthreads(); if ((blockSize >= 2) && ( tid < 1)) { sdata[tid] = mySum = mySum + sdata[tid + 1]; sdataSq[tid] = mySumSq = mySumSq + sdataSq[tid + 1]; } __syncthreads(); #endif // write result for this block to global mem if (tid == 0){ g_odata[blockIdx.x] = mySum; g_odataSq[blockIdx.x] = mySumSq; } } extern "C" bool isPow2(unsigned int x); //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template <class T> void reduce(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch switch (whichKernel) { case 0: reduce0<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduce1<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduce2<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 3: reduce3<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: switch (threads) { case 512: reduce4<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduce4<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce4<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduce4<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduce4<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduce4<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduce4<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduce4<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduce4<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduce4<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } break; case 5: switch (threads) { case 512: reduce5<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduce5<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce5<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduce5<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduce5<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduce5<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduce5<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduce5<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduce5<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduce5<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } break; case 6: default: if (isPow2(size)) { switch (threads) { case 512: reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } else { switch (threads) { case 512: reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } break; } } // Instantiate the reduction function for 3 types template void reduce<int>(int size, int threads, int blocks, int whichKernel, int *d_idata, int *d_odata); template void reduce<float>(int size, int threads, int blocks, int whichKernel, float *d_idata, float *d_odata); template void reduce<double>(int size, int threads, int blocks, int whichKernel, double *d_idata, double *d_odata); template <class T> void reduceSq(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata, T *d_odataSq) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch switch (whichKernel) { case 6: default: if (isPow2(size)) { switch (threads) { case 512: reduceSq<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 256: reduceSq<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 128: reduceSq<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 64: reduceSq<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 32: reduceSq<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 16: reduceSq<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 8: reduceSq<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 4: reduceSq<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 2: reduceSq<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 1: reduceSq<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; } } else { switch (threads) { case 512: reduceSq<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 256: reduceSq<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 128: reduceSq<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 64: reduceSq<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 32: reduceSq<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 16: reduceSq<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 8: reduceSq<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 4: reduceSq<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 2: reduceSq<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; case 1: reduceSq<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, d_odataSq, size); break; } } break; } } template void reduceSq<int>(int size, int threads, int blocks, int whichKernel, int *d_idata, int *d_odata, int *d_odataSq); template void reduceSq<float>(int size, int threads, int blocks, int whichKernel, float *d_idata, float *d_odata, float *d_odataSq); template void reduceSq<double>(int size, int threads, int blocks, int whichKernel, double *d_idata, double *d_odata, double *d_odataSq); #endif // #ifndef _REDUCE_KERNEL_H_
e61bd3a14335534237348edf202b75ebf1361b1f.hip
// !!! This is a file automatically generated by hipify!!! //xfail:REPAIR_ERROR //--blockDim=2 --gridDim=1 --no-inline #include <hip/hip_runtime.h> __global__ void init_test(hiprandState_t *state, unsigned int *A) { hiprand_init(0, 0, 0, state); __syncthreads(); if (threadIdx.x == 0) { A[0] = hiprand(state); } }
e61bd3a14335534237348edf202b75ebf1361b1f.cu
//xfail:REPAIR_ERROR //--blockDim=2 --gridDim=1 --no-inline #include <cuda.h> __global__ void init_test(curandState *state, unsigned int *A) { curand_init(0, 0, 0, state); __syncthreads(); if (threadIdx.x == 0) { A[0] = curand(state); } }
5389e80696761f07562e581df5ca3f165bad4f7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 0.0002 #define P 0.5 #define G 0.75 #define BLOCKS 32 #define THREADS_PER_BLOCK 64 //Assume square matrix #define MATRIX_DIM 512 __global__ void matrix_init(float *matrix, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x))*tasks; unsigned int position; for (unsigned int i = 0; i < tasks && (i+id) < matrix_size; ++i) { position = i + id; matrix[position] = 0; } } __global__ void hit_the_drum(float* matrix, int x, int y) { matrix[x + y * MATRIX_DIM] = 1; } __global__ void simulate(float* u0, float* u1, float* u2, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x)) * tasks; unsigned int position; unsigned int special_case = 0; for (unsigned int i = 0; (i < tasks) && ((i + id) < matrix_size); ++i) { position = i + id; // All the outer elements should be handled later. ////Check for corner: //if (position == 0) { //(0,0) // //u0[0] = G * u0[1]; //} //else if (position == MATRIX_DIM - 1) { //(N-1,0) // //u0[MATRIX_DIM - 1] = G * u0[MATRIX_DIM - 2]; //} //else if (position == ((MATRIX_DIM - 1) * MATRIX_DIM) || position == (MATRIX_DIM-1 + (MATRIX_DIM - 1) * MATRIX_DIM)) {//(0,N-1),(N-1,N-1) // //u0[position] = G * u0[position - MATRIX_DIM]; //} ////Check for side: //else if ((position % MATRIX_DIM) == 0) {//(0,i) // //u0[position] = G * u0[position + 1]; //} //else if ((position % MATRIX_DIM) == (MATRIX_DIM - 1)) { // (N-1,i) // //u0[position] = G * u0[position - 1]; //} //else if (position < MATRIX_DIM) {//(i,0) // //u0[position] = G * u0[position + MATRIX_DIM]; //} //else if (position >= (MATRIX_DIM - 1) * MATRIX_DIM) {//(i,N-1) // //u0[position] = G * u0[position - MATRIX_DIM]; //} ////Inner elements //else { if(position%MATRIX_DIM > 0 && position%MATRIX_DIM < MATRIX_DIM-1 && position > MATRIX_DIM && position < (MATRIX_DIM-1)*MATRIX_DIM){ u0[position] = P * (u1[position - 1] + u1[position + 1] + u1[position - MATRIX_DIM] + u1[position + MATRIX_DIM] - 4 * u1[position]) + 2 * u1[position] - (1 - N) * u2[position]; u0[position] = u0[position] / (1 + N); //printf("[position]: %f\n", position, u0[position]); } } } __global__ void simulate_sides(float* u0, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x)) * tasks; unsigned int position; unsigned int special_case = 0; for (unsigned int i = 0; (i < tasks) && ((i + id) < matrix_size); ++i) { position = i + id; //Check for corner, which will be computed later. if (position == 0) { //(0,0) //u0[0] = G * u0[1]; } else if (position == MATRIX_DIM - 1) { //(N-1,0) //u0[MATRIX_DIM - 1] = G * u0[MATRIX_DIM - 2]; } else if (position == ((MATRIX_DIM - 1) * MATRIX_DIM) || position == (MATRIX_DIM-1 + (MATRIX_DIM - 1) * MATRIX_DIM)) {//(0,N-1),(N-1,N-1) //u0[position] = G * u0[position - MATRIX_DIM]; } //Check for side else if ((position % MATRIX_DIM) == 0) {//(0,i) u0[position] = G * u0[position + 1]; } else if ((position % MATRIX_DIM) == (MATRIX_DIM - 1)) { // (N-1,i) u0[position] = G * u0[position - 1]; } else if (position < MATRIX_DIM) {//(i,0) u0[position] = G * u0[position + MATRIX_DIM]; } else if (position >= (MATRIX_DIM - 1) * MATRIX_DIM) {//(i,N-1) u0[position] = G * u0[position - MATRIX_DIM]; } } } __global__ void simulate_corners(float* u0, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x)) * tasks; unsigned int position; unsigned int special_case = 0; for (unsigned int i = 0; (i < tasks) && ((i + id) < matrix_size); ++i) { position = i + id; //Check for corner: if (position == 0) { //(0,0) u0[0] = G * u0[1]; } else if (position == MATRIX_DIM - 1) { //(N-1,0) u0[MATRIX_DIM - 1] = G * u0[MATRIX_DIM - 2]; } else if (position == ((MATRIX_DIM - 1) * MATRIX_DIM) || position == (MATRIX_DIM - 1 + (MATRIX_DIM - 1) * MATRIX_DIM)) {//(0,N-1),(N-1,N-1) u0[position] = G * u0[position - MATRIX_DIM]; } } } void shift_reference(float* &u0, float* &u1, float* &u2) { hipFree(u2); u2 = u1; u1 = u0; } int main(int argc, char* argv[]) { clock_t start = clock(); if (argc != 2) { printf("Please enter a positive number of iteration as the argument.\n"); return -1; } unsigned int iteration = atoi(argv[1]); float* u0; float* cuda_u0, *cuda_u1, * cuda_u2; float* cuda_dummy; size_t matrix_size_float = MATRIX_DIM * MATRIX_DIM * sizeof(float); unsigned int matrix_size = MATRIX_DIM * MATRIX_DIM; hipError_t cudaStatus; //malloc u0; u0 = (float*)malloc(matrix_size_float); //hipMalloc cudaStatus = hipMalloc((void**)&cuda_u0, matrix_size_float); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); return -1; } cudaStatus = hipMalloc((void**)&cuda_u1, matrix_size_float); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); return -1; } cudaStatus = hipMalloc((void**)&cuda_u2, matrix_size_float); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); return -1; } //Initialize number of tasks per threads. unsigned int tasks_thread; if(matrix_size% (BLOCKS * THREADS_PER_BLOCK) == 0) tasks_thread = matrix_size / (BLOCKS * THREADS_PER_BLOCK); else tasks_thread = matrix_size / (BLOCKS * THREADS_PER_BLOCK) + 1; //Initialize matrix to 0 matrix_init << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u1, tasks_thread, matrix_size); matrix_init << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u2, tasks_thread, matrix_size); hipDeviceSynchronize(); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); return -1; } hit_the_drum << <1, 1 >> > (cuda_u1, MATRIX_DIM/2, MATRIX_DIM/2); hipDeviceSynchronize(); //Simulation for (int i = 0; i < iteration; ++i) { simulate << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u0, cuda_u1, cuda_u2, tasks_thread, matrix_size); simulate_sides << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u0, tasks_thread, matrix_size); simulate_corners << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u0, tasks_thread, matrix_size); hipDeviceSynchronize(); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); return -1; } shift_reference(cuda_u0, cuda_u1, cuda_u2); //hipMalloc cudaStatus = hipMalloc((void**)&cuda_u0, matrix_size_float); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); return -1; } cudaStatus = hipMemcpy(u0, cuda_u1, matrix_size_float, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); return -1; } printf("[%d][%d]: %.5f\n", MATRIX_DIM / 2, MATRIX_DIM / 2, u0[MATRIX_DIM / 2 + (MATRIX_DIM / 2) * MATRIX_DIM]); } printf("Alles ist gut, %lu msec passed.\n",clock()-start); hipFree(cuda_u0); hipFree(cuda_u1); hipFree(cuda_u2); free(u0); return 0; }
5389e80696761f07562e581df5ca3f165bad4f7c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 0.0002 #define P 0.5 #define G 0.75 #define BLOCKS 32 #define THREADS_PER_BLOCK 64 //Assume square matrix #define MATRIX_DIM 512 __global__ void matrix_init(float *matrix, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x))*tasks; unsigned int position; for (unsigned int i = 0; i < tasks && (i+id) < matrix_size; ++i) { position = i + id; matrix[position] = 0; } } __global__ void hit_the_drum(float* matrix, int x, int y) { matrix[x + y * MATRIX_DIM] = 1; } __global__ void simulate(float* u0, float* u1, float* u2, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x)) * tasks; unsigned int position; unsigned int special_case = 0; for (unsigned int i = 0; (i < tasks) && ((i + id) < matrix_size); ++i) { position = i + id; // All the outer elements should be handled later. ////Check for corner: //if (position == 0) { //(0,0) // //u0[0] = G * u0[1]; //} //else if (position == MATRIX_DIM - 1) { //(N-1,0) // //u0[MATRIX_DIM - 1] = G * u0[MATRIX_DIM - 2]; //} //else if (position == ((MATRIX_DIM - 1) * MATRIX_DIM) || position == (MATRIX_DIM-1 + (MATRIX_DIM - 1) * MATRIX_DIM)) {//(0,N-1),(N-1,N-1) // //u0[position] = G * u0[position - MATRIX_DIM]; //} ////Check for side: //else if ((position % MATRIX_DIM) == 0) {//(0,i) // //u0[position] = G * u0[position + 1]; //} //else if ((position % MATRIX_DIM) == (MATRIX_DIM - 1)) { // (N-1,i) // //u0[position] = G * u0[position - 1]; //} //else if (position < MATRIX_DIM) {//(i,0) // //u0[position] = G * u0[position + MATRIX_DIM]; //} //else if (position >= (MATRIX_DIM - 1) * MATRIX_DIM) {//(i,N-1) // //u0[position] = G * u0[position - MATRIX_DIM]; //} ////Inner elements //else { if(position%MATRIX_DIM > 0 && position%MATRIX_DIM < MATRIX_DIM-1 && position > MATRIX_DIM && position < (MATRIX_DIM-1)*MATRIX_DIM){ u0[position] = P * (u1[position - 1] + u1[position + 1] + u1[position - MATRIX_DIM] + u1[position + MATRIX_DIM] - 4 * u1[position]) + 2 * u1[position] - (1 - N) * u2[position]; u0[position] = u0[position] / (1 + N); //printf("[position]: %f\n", position, u0[position]); } } } __global__ void simulate_sides(float* u0, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x)) * tasks; unsigned int position; unsigned int special_case = 0; for (unsigned int i = 0; (i < tasks) && ((i + id) < matrix_size); ++i) { position = i + id; //Check for corner, which will be computed later. if (position == 0) { //(0,0) //u0[0] = G * u0[1]; } else if (position == MATRIX_DIM - 1) { //(N-1,0) //u0[MATRIX_DIM - 1] = G * u0[MATRIX_DIM - 2]; } else if (position == ((MATRIX_DIM - 1) * MATRIX_DIM) || position == (MATRIX_DIM-1 + (MATRIX_DIM - 1) * MATRIX_DIM)) {//(0,N-1),(N-1,N-1) //u0[position] = G * u0[position - MATRIX_DIM]; } //Check for side else if ((position % MATRIX_DIM) == 0) {//(0,i) u0[position] = G * u0[position + 1]; } else if ((position % MATRIX_DIM) == (MATRIX_DIM - 1)) { // (N-1,i) u0[position] = G * u0[position - 1]; } else if (position < MATRIX_DIM) {//(i,0) u0[position] = G * u0[position + MATRIX_DIM]; } else if (position >= (MATRIX_DIM - 1) * MATRIX_DIM) {//(i,N-1) u0[position] = G * u0[position - MATRIX_DIM]; } } } __global__ void simulate_corners(float* u0, unsigned int tasks, unsigned int matrix_size) { unsigned int id = (threadIdx.x + (blockIdx.x * blockDim.x)) * tasks; unsigned int position; unsigned int special_case = 0; for (unsigned int i = 0; (i < tasks) && ((i + id) < matrix_size); ++i) { position = i + id; //Check for corner: if (position == 0) { //(0,0) u0[0] = G * u0[1]; } else if (position == MATRIX_DIM - 1) { //(N-1,0) u0[MATRIX_DIM - 1] = G * u0[MATRIX_DIM - 2]; } else if (position == ((MATRIX_DIM - 1) * MATRIX_DIM) || position == (MATRIX_DIM - 1 + (MATRIX_DIM - 1) * MATRIX_DIM)) {//(0,N-1),(N-1,N-1) u0[position] = G * u0[position - MATRIX_DIM]; } } } void shift_reference(float* &u0, float* &u1, float* &u2) { cudaFree(u2); u2 = u1; u1 = u0; } int main(int argc, char* argv[]) { clock_t start = clock(); if (argc != 2) { printf("Please enter a positive number of iteration as the argument.\n"); return -1; } unsigned int iteration = atoi(argv[1]); float* u0; float* cuda_u0, *cuda_u1, * cuda_u2; float* cuda_dummy; size_t matrix_size_float = MATRIX_DIM * MATRIX_DIM * sizeof(float); unsigned int matrix_size = MATRIX_DIM * MATRIX_DIM; cudaError_t cudaStatus; //malloc u0; u0 = (float*)malloc(matrix_size_float); //cudaMalloc cudaStatus = cudaMalloc((void**)&cuda_u0, matrix_size_float); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); return -1; } cudaStatus = cudaMalloc((void**)&cuda_u1, matrix_size_float); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); return -1; } cudaStatus = cudaMalloc((void**)&cuda_u2, matrix_size_float); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); return -1; } //Initialize number of tasks per threads. unsigned int tasks_thread; if(matrix_size% (BLOCKS * THREADS_PER_BLOCK) == 0) tasks_thread = matrix_size / (BLOCKS * THREADS_PER_BLOCK); else tasks_thread = matrix_size / (BLOCKS * THREADS_PER_BLOCK) + 1; //Initialize matrix to 0 matrix_init << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u1, tasks_thread, matrix_size); matrix_init << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u2, tasks_thread, matrix_size); cudaDeviceSynchronize(); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); return -1; } hit_the_drum << <1, 1 >> > (cuda_u1, MATRIX_DIM/2, MATRIX_DIM/2); cudaDeviceSynchronize(); //Simulation for (int i = 0; i < iteration; ++i) { simulate << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u0, cuda_u1, cuda_u2, tasks_thread, matrix_size); simulate_sides << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u0, tasks_thread, matrix_size); simulate_corners << <BLOCKS, THREADS_PER_BLOCK >> > (cuda_u0, tasks_thread, matrix_size); cudaDeviceSynchronize(); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); return -1; } shift_reference(cuda_u0, cuda_u1, cuda_u2); //cudaMalloc cudaStatus = cudaMalloc((void**)&cuda_u0, matrix_size_float); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); return -1; } cudaStatus = cudaMemcpy(u0, cuda_u1, matrix_size_float, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); return -1; } printf("[%d][%d]: %.5f\n", MATRIX_DIM / 2, MATRIX_DIM / 2, u0[MATRIX_DIM / 2 + (MATRIX_DIM / 2) * MATRIX_DIM]); } printf("Alles ist gut, %lu msec passed.\n",clock()-start); cudaFree(cuda_u0); cudaFree(cuda_u1); cudaFree(cuda_u2); free(u0); return 0; }
077db126ddf97113e8c210a58b8a5eb8e654f059.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <hip/hip_runtime.h> #include <omp.h> struct timespec s, e, all_start, all_end; long cpu_time = 0, io_time = 0, communication_time = 0; void calc_time(long* target, struct timespec a, struct timespec b) { int sec = a.tv_sec - b.tv_sec; int nsec = a.tv_nsec - b.tv_nsec; *target += ((long) sec) * 1000000000 + nsec; } __global__ void calPhase1(int B, int Round, int* Dist, int node, int pitch) { extern __shared__ int sdata[]; int x = threadIdx.x; int y = threadIdx.y; int sx = Round*B+x; int sy = Round*B+y; sdata[x*B+y]=Dist[sx*pitch+sy]; __syncthreads(); int tem; #pragma unroll for (int k = 0; k < B ; ++k) { tem=sdata[x*B+k] + sdata[k*B+y]; if (tem < sdata[x*B+y]) sdata[x*B+y] = tem; __syncthreads(); } Dist[sx*pitch+sy]=sdata[x*B+y]; __syncthreads(); } __global__ void calPhase2(int B, int Round, int* Dist, int node, int pitch) { if(blockIdx.x==Round) return; extern __shared__ int sm[]; int* p = &sm[B*B]; int x = threadIdx.x; int y = threadIdx.y; unsigned int sx = Round*B+x; unsigned int sy = Round*B+y; sm[x*B+y]=Dist[sx*pitch+sy]; unsigned int rx = blockIdx.x*B+x; unsigned int cy = blockIdx.x*B+y; unsigned int idx= (blockIdx.y == 1)?rx*pitch+sy:sx*pitch+cy; p[x*B+y]=Dist[idx]; __syncthreads(); int* a =(blockIdx.y == 0)?&sm[0]:p; int* b =(blockIdx.y == 1)?&sm[0]:p; int tem; #pragma unroll for (int k = 0; k < B ; ++k) { tem=a[x*B+k] + b[k*B+y]; if ( tem < p[x*B+y]) p[x*B+y] = tem; } Dist[idx]=p[x*B+y]; } __global__ void calPhase3(int B, int Round, int* Dist, int node, int pitch,int threadId,int halfRound) { int blockIdxx=blockIdx.x; if(threadId==1) blockIdxx=blockIdxx+halfRound; if (blockIdxx == Round || blockIdx.y == Round) return; extern __shared__ int sm[]; int* pr = &sm[0]; int* pc = &sm[B*B]; int x = threadIdx.x; int y = threadIdx.y; int rx = blockIdxx*blockDim.x+x; int ry = Round*B+y; int cx = Round*B+x; int cy = blockIdx.y*blockDim.y+y; pr[x*B+y]=Dist[rx*pitch+ry]; pc[x*B+y]=Dist[cx*pitch+cy]; __syncthreads(); if (rx >= node || cy >= node) return; int tem; int ans=Dist[rx*pitch+cy] ; #pragma omp parallel for for (int k = 0; k < B ; ++k) { tem=pr[x*B+k] + pc[k*B+y]; if ( tem<ans){ ans=tem; } } Dist[rx*pitch+cy] = ans; } int main(int argc, char* argv[]) { //input(argv[1]); FILE *fp = fopen(argv[1], "rb"); int n, edge; clock_gettime(CLOCK_REALTIME, &s); fread(&n , sizeof(int), 1, fp); fread(&edge, sizeof(int), 1, fp); clock_gettime(CLOCK_REALTIME, &e); calc_time(&io_time, e, s); int B = (n>32)?32:16;//atoi(argv[3]); int round = (n + B -1)/B; int pitch_n = round*B;//(n%B==0)?n:n-n%B+B; int* Dist;//=(int*) malloc(pitch_n * pitch_n * sizeof(int)); hipHostMalloc(&Dist, sizeof(int)*pitch_n*pitch_n); clock_gettime(CLOCK_REALTIME, &s); #pragma omp parallel for collapse(2) for (int i = 0; i < pitch_n; ++i) { for (int j = 0; j < pitch_n; ++j) { if (i == j) Dist[i*pitch_n+j] = 0; else Dist[i*pitch_n+j] = 1000000000; } } clock_gettime(CLOCK_REALTIME, &e); calc_time(&cpu_time, e, s); int* temp =(int*) malloc(edge * 3 * sizeof(int)); clock_gettime(CLOCK_REALTIME, &s); fread(temp, sizeof(int), edge * 3, fp); clock_gettime(CLOCK_REALTIME, &e); calc_time(&io_time, e, s); clock_gettime(CLOCK_REALTIME, &s); #pragma omp parallel for for (int i = 0; i < edge*3; i=i+3) Dist[temp[i]*pitch_n+temp[i+1]] = temp[i+2]; clock_gettime(CLOCK_REALTIME, &e); calc_time(&cpu_time, e, s); //block_FW(B); float time; float GPU_time = 0; hipEvent_t start, stop; hipEventCreate (&start); hipEventCreate (&stop); int* devDist[2]; #pragma omp parallel num_threads(2) { int threadId = omp_get_thread_num(); //hipSetDevice(threadId); hipMalloc(&devDist[threadId], sizeof(int) * pitch_n * pitch_n); hipMemcpy(devDist[threadId], Dist, sizeof(int) * pitch_n * pitch_n, hipMemcpyHostToDevice); dim3 grid1(1, 1); dim3 grid2(round, 2); dim3 grid3(round, round); if(threadId == 0) grid3.x = round/2; else grid3.x = round-(round/2); dim3 block(B, B); int sSize = B * B * sizeof(int); hipEventRecord (start, 0); for (int r = 0; r < round; ++r) { hipLaunchKernelGGL(( calPhase1), dim3(grid1), dim3(block), sSize , 0, B, r, devDist[threadId], n, pitch_n); hipLaunchKernelGGL(( calPhase2), dim3(grid2), dim3(block), sSize*2, 0, B, r, devDist[threadId], n, pitch_n); hipLaunchKernelGGL(( calPhase3), dim3(grid3), dim3(block), sSize*2, 0, B, r, devDist[threadId], n, pitch_n,threadId,round/2); hipDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &s); #pragma omp barrier if(threadId==0) hipMemcpyPeer(devDist[1], 1, devDist[0], 0, round/2*B*sizeof(int) * pitch_n); else hipMemcpyPeer(&devDist[0][round/2 *pitch_n*B], 0, &devDist[1][round/2 *pitch_n*B], 1, (round-round/2)*B*sizeof(int) * pitch_n); #pragma omp barrier clock_gettime(CLOCK_REALTIME, &e); calc_time(&communication_time, e, s); } } hipEventRecord (stop, 0); hipEventElapsedTime (&time, start, stop); GPU_time = time/1000 - (communication_time/1000000000.0); hipDeviceSynchronize(); hipMemcpy2D(Dist, sizeof(int) *n, devDist[0], sizeof(int) * pitch_n, sizeof(int) *n, n, hipMemcpyDeviceToHost); //output(argv[2]); fp = fopen(argv[2], "wb+"); clock_gettime(CLOCK_REALTIME, &s); fwrite(Dist, sizeof(int), n*n, fp); clock_gettime(CLOCK_REALTIME, &e); calc_time(&io_time, e, s); printf("io_time:%lf\n",(io_time/1000000000.0)); printf ("GPU time = %lf\n", GPU_time); printf("communication time = %lf\n",(communication_time/1000000000.0)); printf("cpu_time:%lf\n",(cpu_time/1000000000.0)); fclose(fp); return 0; }
077db126ddf97113e8c210a58b8a5eb8e654f059.cu
#include <stdio.h> #include <stdlib.h> #include <limits.h> #include <cuda_runtime.h> #include <omp.h> struct timespec s, e, all_start, all_end; long cpu_time = 0, io_time = 0, communication_time = 0; void calc_time(long* target, struct timespec a, struct timespec b) { int sec = a.tv_sec - b.tv_sec; int nsec = a.tv_nsec - b.tv_nsec; *target += ((long) sec) * 1000000000 + nsec; } __global__ void calPhase1(int B, int Round, int* Dist, int node, int pitch) { extern __shared__ int sdata[]; int x = threadIdx.x; int y = threadIdx.y; int sx = Round*B+x; int sy = Round*B+y; sdata[x*B+y]=Dist[sx*pitch+sy]; __syncthreads(); int tem; #pragma unroll for (int k = 0; k < B ; ++k) { tem=sdata[x*B+k] + sdata[k*B+y]; if (tem < sdata[x*B+y]) sdata[x*B+y] = tem; __syncthreads(); } Dist[sx*pitch+sy]=sdata[x*B+y]; __syncthreads(); } __global__ void calPhase2(int B, int Round, int* Dist, int node, int pitch) { if(blockIdx.x==Round) return; extern __shared__ int sm[]; int* p = &sm[B*B]; int x = threadIdx.x; int y = threadIdx.y; unsigned int sx = Round*B+x; unsigned int sy = Round*B+y; sm[x*B+y]=Dist[sx*pitch+sy]; unsigned int rx = blockIdx.x*B+x; unsigned int cy = blockIdx.x*B+y; unsigned int idx= (blockIdx.y == 1)?rx*pitch+sy:sx*pitch+cy; p[x*B+y]=Dist[idx]; __syncthreads(); int* a =(blockIdx.y == 0)?&sm[0]:p; int* b =(blockIdx.y == 1)?&sm[0]:p; int tem; #pragma unroll for (int k = 0; k < B ; ++k) { tem=a[x*B+k] + b[k*B+y]; if ( tem < p[x*B+y]) p[x*B+y] = tem; } Dist[idx]=p[x*B+y]; } __global__ void calPhase3(int B, int Round, int* Dist, int node, int pitch,int threadId,int halfRound) { int blockIdxx=blockIdx.x; if(threadId==1) blockIdxx=blockIdxx+halfRound; if (blockIdxx == Round || blockIdx.y == Round) return; extern __shared__ int sm[]; int* pr = &sm[0]; int* pc = &sm[B*B]; int x = threadIdx.x; int y = threadIdx.y; int rx = blockIdxx*blockDim.x+x; int ry = Round*B+y; int cx = Round*B+x; int cy = blockIdx.y*blockDim.y+y; pr[x*B+y]=Dist[rx*pitch+ry]; pc[x*B+y]=Dist[cx*pitch+cy]; __syncthreads(); if (rx >= node || cy >= node) return; int tem; int ans=Dist[rx*pitch+cy] ; #pragma omp parallel for for (int k = 0; k < B ; ++k) { tem=pr[x*B+k] + pc[k*B+y]; if ( tem<ans){ ans=tem; } } Dist[rx*pitch+cy] = ans; } int main(int argc, char* argv[]) { //input(argv[1]); FILE *fp = fopen(argv[1], "rb"); int n, edge; clock_gettime(CLOCK_REALTIME, &s); fread(&n , sizeof(int), 1, fp); fread(&edge, sizeof(int), 1, fp); clock_gettime(CLOCK_REALTIME, &e); calc_time(&io_time, e, s); int B = (n>32)?32:16;//atoi(argv[3]); int round = (n + B -1)/B; int pitch_n = round*B;//(n%B==0)?n:n-n%B+B; int* Dist;//=(int*) malloc(pitch_n * pitch_n * sizeof(int)); cudaMallocHost(&Dist, sizeof(int)*pitch_n*pitch_n); clock_gettime(CLOCK_REALTIME, &s); #pragma omp parallel for collapse(2) for (int i = 0; i < pitch_n; ++i) { for (int j = 0; j < pitch_n; ++j) { if (i == j) Dist[i*pitch_n+j] = 0; else Dist[i*pitch_n+j] = 1000000000; } } clock_gettime(CLOCK_REALTIME, &e); calc_time(&cpu_time, e, s); int* temp =(int*) malloc(edge * 3 * sizeof(int)); clock_gettime(CLOCK_REALTIME, &s); fread(temp, sizeof(int), edge * 3, fp); clock_gettime(CLOCK_REALTIME, &e); calc_time(&io_time, e, s); clock_gettime(CLOCK_REALTIME, &s); #pragma omp parallel for for (int i = 0; i < edge*3; i=i+3) Dist[temp[i]*pitch_n+temp[i+1]] = temp[i+2]; clock_gettime(CLOCK_REALTIME, &e); calc_time(&cpu_time, e, s); //block_FW(B); float time; float GPU_time = 0; cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); int* devDist[2]; #pragma omp parallel num_threads(2) { int threadId = omp_get_thread_num(); //cudaSetDevice(threadId); cudaMalloc(&devDist[threadId], sizeof(int) * pitch_n * pitch_n); cudaMemcpy(devDist[threadId], Dist, sizeof(int) * pitch_n * pitch_n, cudaMemcpyHostToDevice); dim3 grid1(1, 1); dim3 grid2(round, 2); dim3 grid3(round, round); if(threadId == 0) grid3.x = round/2; else grid3.x = round-(round/2); dim3 block(B, B); int sSize = B * B * sizeof(int); cudaEventRecord (start, 0); for (int r = 0; r < round; ++r) { calPhase1<<<grid1, block, sSize >>>(B, r, devDist[threadId], n, pitch_n); calPhase2<<<grid2, block, sSize*2>>>(B, r, devDist[threadId], n, pitch_n); calPhase3<<<grid3, block, sSize*2>>>(B, r, devDist[threadId], n, pitch_n,threadId,round/2); cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &s); #pragma omp barrier if(threadId==0) cudaMemcpyPeer(devDist[1], 1, devDist[0], 0, round/2*B*sizeof(int) * pitch_n); else cudaMemcpyPeer(&devDist[0][round/2 *pitch_n*B], 0, &devDist[1][round/2 *pitch_n*B], 1, (round-round/2)*B*sizeof(int) * pitch_n); #pragma omp barrier clock_gettime(CLOCK_REALTIME, &e); calc_time(&communication_time, e, s); } } cudaEventRecord (stop, 0); cudaEventElapsedTime (&time, start, stop); GPU_time = time/1000 - (communication_time/1000000000.0); cudaDeviceSynchronize(); cudaMemcpy2D(Dist, sizeof(int) *n, devDist[0], sizeof(int) * pitch_n, sizeof(int) *n, n, cudaMemcpyDeviceToHost); //output(argv[2]); fp = fopen(argv[2], "wb+"); clock_gettime(CLOCK_REALTIME, &s); fwrite(Dist, sizeof(int), n*n, fp); clock_gettime(CLOCK_REALTIME, &e); calc_time(&io_time, e, s); printf("io_time:%lf\n",(io_time/1000000000.0)); printf ("GPU time = %lf\n", GPU_time); printf("communication time = %lf\n",(communication_time/1000000000.0)); printf("cpu_time:%lf\n",(cpu_time/1000000000.0)); fclose(fp); return 0; }
1ae6809ee3dd2714fb494e945ebf47938700438a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This code generates random Erdos Renyi graph using cuda. The corresponding author is Sadegh Nobari If use please cite: @inproceedings{Nobari:2011, author = {Nobari, Sadegh and Lu, Xuesong and Karras, Panagiotis and Bressan, St\'{e}phane}, title = {Fast random graph generation}, booktitle = {Proceedings of the 14th International Conference on Extending Database Technology}, series = {EDBT/ICDT '11}, year = {2011}, isbn = {978-1-4503-0528-0}, location = {Uppsala, Sweden}, pages = {331--342}, numpages = {12}, url = {http://doi.acm.org/10.1145/1951365.1951406}, doi = {http://doi.acm.org/10.1145/1951365.1951406}, acmid = {1951406}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {Erd\H{o}s-r\'{e}nyi, Gilbert, parallel algorithm, random graphs}, } Last update 19 Jun 2011 */ /* After introducing the CURAND library in 2011, it is recommended to use the CURAND uniform random number generator. please check the comments in the initialization sections for more detail. In kernel function RND simply generetes a uniform random number. */ #ifndef _FastGG_KERNEL_H_ #define _FastGG_KERNEL_H_ #include "FastGG.h" #define RND hiprand_uniform(&localState) //Output range excludes 0.0f but includes 1.0f //////////////////////////////////////////////////////////////////////////////// //! RNG init kernel //////////////////////////////////////////////////////////////////////////////// __global__ void initRNG(hiprandState_t * const rngStates, const unsigned int seed) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Initialise the RNG hiprand_init(seed, tid, 0, &rngStates[tid]); } //////////////////////////////////////////////////////////////////////////////// //! PER Kernel //////////////////////////////////////////////////////////////////////////////// __global__ void Kernel_PER( UINT Seed,VTYPE * EdgeList,VTYPE * Valids,VTYPE pXMaxRND,UINT ItemsPerThread,VTYPE NumItems) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data;//,c; //const UINT cycles=10; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid*16807; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread; pos++) { //Generating Random Number data=(data*A)%M; if(data<pXMaxRND) { EdgeList[pos]=pos; Valids[pos]=1; } else Valids[pos]=0; } } //////////////////////////////////////////////////////////////////////////////// //! PXER Kernel //////////////////////////////////////////////////////////////////////////////// __global__ void Kernel_PZER( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid*16807; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; //computing skip and wrtie it to global memory if(log1p==0 && tid==0) Results[0]=M-1; else if(log1p==M) Results[pos]=1; else Results[pos]=ceil( (log((double)data)*log1p)-logPmax)+1; } } //////////////////////////////////////////////////////////////////////////////// //! PPreZER Kernels for precomputations 1 to 10 //////////////////////////////////////////////////////////////////////////////// __global__ void Kernel_PPreZER10( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7,VTYPE precomp8,VTYPE precomp9,VTYPE precomp10) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else if(skip<precomp8) skip=8; else if(skip<precomp9) skip=9; else if(skip<precomp10) skip=10; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER9( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7,VTYPE precomp8,VTYPE precomp9) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else if(skip<precomp8) skip=8; else if(skip<precomp9) skip=9; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER8( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7,VTYPE precomp8) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else if(skip<precomp8) skip=8; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER7( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER6( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER5( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER4( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER3( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER2( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER1( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } //////////////////////////////////////////////////////////////////////////////// //! Kernel Invokers //////////////////////////////////////////////////////////////////////////////// void initRNGInvoker( dim3 dimGrid, dim3 dimBlock, hiprandState_t * const rngStates, const unsigned int seed) { hipLaunchKernelGGL(( initRNG), dim3(dimGrid), dim3(dimBlock) , 0, 0, rngStates, seed ); } void PER_Invoker( dim3 dimGrid,dim3 dimBlock,UINT Seed,VTYPE * EdgeList,VTYPE * Valids,VTYPE pXMaxRND,UINT ItemsPerThread,VTYPE NumItems) { hipLaunchKernelGGL(( Kernel_PER), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,EdgeList,Valids,pXMaxRND,ItemsPerThread,NumItems); } void PZER_Invoker( dim3 dimGrid,dim3 dimBlock,UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems) { hipLaunchKernelGGL(( Kernel_PZER), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems); } void PPreZER_Invoker( dim3 dimGrid,dim3 dimBlock,UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE * pres,VTYPE numpre) { if(numpre==10) hipLaunchKernelGGL(( Kernel_PPreZER10), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6],pres[7],pres[8],pres[9]); else if(numpre==9) hipLaunchKernelGGL(( Kernel_PPreZER9), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6],pres[7],pres[8]); else if(numpre==8) hipLaunchKernelGGL(( Kernel_PPreZER8), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6],pres[7]); else if(numpre==7) hipLaunchKernelGGL(( Kernel_PPreZER7), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6]); else if(numpre==6) hipLaunchKernelGGL(( Kernel_PPreZER6), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5]); else if(numpre==5) hipLaunchKernelGGL(( Kernel_PPreZER5), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4]); else if(numpre==4) hipLaunchKernelGGL(( Kernel_PPreZER4), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3]); else if(numpre==3) hipLaunchKernelGGL(( Kernel_PPreZER3), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2]); else if(numpre==2) hipLaunchKernelGGL(( Kernel_PPreZER2), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1]); else hipLaunchKernelGGL(( Kernel_PPreZER1), dim3(dimGrid), dim3(dimBlock) , 0, 0, Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0]); } #endif // #ifndef _FastGG_KERNEL_H_
1ae6809ee3dd2714fb494e945ebf47938700438a.cu
/* This code generates random Erdos Renyi graph using cuda. The corresponding author is Sadegh Nobari If use please cite: @inproceedings{Nobari:2011, author = {Nobari, Sadegh and Lu, Xuesong and Karras, Panagiotis and Bressan, St\'{e}phane}, title = {Fast random graph generation}, booktitle = {Proceedings of the 14th International Conference on Extending Database Technology}, series = {EDBT/ICDT '11}, year = {2011}, isbn = {978-1-4503-0528-0}, location = {Uppsala, Sweden}, pages = {331--342}, numpages = {12}, url = {http://doi.acm.org/10.1145/1951365.1951406}, doi = {http://doi.acm.org/10.1145/1951365.1951406}, acmid = {1951406}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {Erd\H{o}s-r\'{e}nyi, Gilbert, parallel algorithm, random graphs}, } Last update 19 Jun 2011 */ /* After introducing the CURAND library in 2011, it is recommended to use the CURAND uniform random number generator. please check the comments in the initialization sections for more detail. In kernel function RND simply generetes a uniform random number. */ #ifndef _FastGG_KERNEL_H_ #define _FastGG_KERNEL_H_ #include "FastGG.h" #define RND curand_uniform(&localState) //Output range excludes 0.0f but includes 1.0f //////////////////////////////////////////////////////////////////////////////// //! RNG init kernel //////////////////////////////////////////////////////////////////////////////// __global__ void initRNG(curandState * const rngStates, const unsigned int seed) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Initialise the RNG curand_init(seed, tid, 0, &rngStates[tid]); } //////////////////////////////////////////////////////////////////////////////// //! PER Kernel //////////////////////////////////////////////////////////////////////////////// __global__ void Kernel_PER( UINT Seed,VTYPE * EdgeList,VTYPE * Valids,VTYPE pXMaxRND,UINT ItemsPerThread,VTYPE NumItems) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data;//,c; //const UINT cycles=10; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid*16807; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread; pos++) { //Generating Random Number data=(data*A)%M; if(data<pXMaxRND) { EdgeList[pos]=pos; Valids[pos]=1; } else Valids[pos]=0; } } //////////////////////////////////////////////////////////////////////////////// //! PXER Kernel //////////////////////////////////////////////////////////////////////////////// __global__ void Kernel_PZER( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid*16807; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; //computing skip and wrtie it to global memory if(log1p==0 && tid==0) Results[0]=M-1; else if(log1p==M) Results[pos]=1; else Results[pos]=ceil( (log((double)data)*log1p)-logPmax)+1; } } //////////////////////////////////////////////////////////////////////////////// //! PPreZER Kernels for precomputations 1 to 10 //////////////////////////////////////////////////////////////////////////////// __global__ void Kernel_PPreZER10( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7,VTYPE precomp8,VTYPE precomp9,VTYPE precomp10) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else if(skip<precomp8) skip=8; else if(skip<precomp9) skip=9; else if(skip<precomp10) skip=10; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER9( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7,VTYPE precomp8,VTYPE precomp9) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else if(skip<precomp8) skip=8; else if(skip<precomp9) skip=9; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER8( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7,VTYPE precomp8) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else if(skip<precomp8) skip=8; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER7( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6,VTYPE precomp7) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else if(skip<precomp7) skip=7; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER6( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5,VTYPE precomp6) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else if(skip<precomp6) skip=6; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER5( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4,VTYPE precomp5) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else if(skip<precomp5) skip=5; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER4( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3,VTYPE precomp4) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else if(skip<precomp4) skip=4; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER3( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2,VTYPE precomp3) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else if(skip<precomp3) skip=3; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER2( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1,VTYPE precomp2) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else if(skip<precomp2) skip=2; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } __global__ void Kernel_PPreZER1( UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE precomp1) { const UINT tid = threadIdx.x + blockIdx.x * blockDim.x; //Initialization VTYPE pos, data,skip; //const UINT cycles=3; const unsigned long long A = 16807; //ie 7**5 data = Seed+tid; pos = tid*ItemsPerThread; //Skip the first Cycle numbers //for (c=1; c<=cycles ; c++) //{ data=(data*A)%M; data=(data*A)%M; data=(data*A)%M; //} for(; pos < (tid+1)*ItemsPerThread & pos<NumItems; pos++) { //Generating Random Number //temp = data * A; //data = temp%M;//temp - M * floor ( temp * reciprocal_m ); data=(data*A)%M; skip=M-data; //computing skip and wrtie it to global memory if(skip<precomp1) skip=1; else { if(log1p==0) skip=M; else skip=ceil( (log((double)data)*log1p)-logPmax)+1; } Results[pos]=skip; } } //////////////////////////////////////////////////////////////////////////////// //! Kernel Invokers //////////////////////////////////////////////////////////////////////////////// void initRNGInvoker( dim3 dimGrid, dim3 dimBlock, curandState * const rngStates, const unsigned int seed) { initRNG<<< dimGrid, dimBlock >>>( rngStates, seed ); } void PER_Invoker( dim3 dimGrid,dim3 dimBlock,UINT Seed,VTYPE * EdgeList,VTYPE * Valids,VTYPE pXMaxRND,UINT ItemsPerThread,VTYPE NumItems) { Kernel_PER<<< dimGrid, dimBlock >>>(Seed,EdgeList,Valids,pXMaxRND,ItemsPerThread,NumItems); } void PZER_Invoker( dim3 dimGrid,dim3 dimBlock,UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems) { Kernel_PZER<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems); } void PPreZER_Invoker( dim3 dimGrid,dim3 dimBlock,UINT Seed,VTYPE * Results,UINT ItemsPerThread, float log1p ,float logPmax,VTYPE offset,VTYPE NumItems,VTYPE * pres,VTYPE numpre) { if(numpre==10) Kernel_PPreZER10<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6],pres[7],pres[8],pres[9]); else if(numpre==9) Kernel_PPreZER9<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6],pres[7],pres[8]); else if(numpre==8) Kernel_PPreZER8<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6],pres[7]); else if(numpre==7) Kernel_PPreZER7<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5],pres[6]); else if(numpre==6) Kernel_PPreZER6<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4],pres[5]); else if(numpre==5) Kernel_PPreZER5<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3],pres[4]); else if(numpre==4) Kernel_PPreZER4<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2],pres[3]); else if(numpre==3) Kernel_PPreZER3<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1],pres[2]); else if(numpre==2) Kernel_PPreZER2<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0],pres[1]); else Kernel_PPreZER1<<< dimGrid, dimBlock >>>(Seed,Results,ItemsPerThread,log1p,logPmax,offset,NumItems,pres[0]); } #endif // #ifndef _FastGG_KERNEL_H_
f34862f6011724b85ac55fc7baf0c57093b471ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif } __global__ void uplo_equals_no_transp (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, int* eq_flag) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { const int ia = offset_a + gid_0 + gid_1 * ld_a; const int ib = offset_b + gid_0 + gid_1 * ld_b; if (a[ia] != b[ib]){ eq_flag[0]++; } } }
f34862f6011724b85ac55fc7baf0c57093b471ea.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif } __global__ void uplo_equals_no_transp (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, int* eq_flag) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { const int ia = offset_a + gid_0 + gid_1 * ld_a; const int ib = offset_b + gid_0 + gid_1 * ld_b; if (a[ia] != b[ib]){ eq_flag[0]++; } } }
bd450091d794043eae621cd4f3160b9b77984f67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void addScalarInArrayInPlace(float* in, float* add, float scale, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in[tid] += add[0] * scale; }
bd450091d794043eae621cd4f3160b9b77984f67.cu
#include "includes.h" __global__ void addScalarInArrayInPlace(float* in, float* add, float scale, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in[tid] += add[0] * scale; }
119875cfd1353898d733225e8b8a226e5ccbe768.hip
// !!! This is a file automatically generated by hipify!!! //##################################################################### // Copyright 2011, Valeria Nikolaenko // This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt. //##################################################################### #include <optix_world.h> #include <optixu/optixu_math_namespace.h> #include "OPTIX_COMMONSTRUCTS.h" #include "OPTIX_HELPERS.h" #include "OPTIX_RAY_STRUCTS.h" using namespace optix; rtTextureSampler<float, 3, hipReadModeElementType> smoke_dencities_tex; rtDeclareVariable(float3, low_corner, , ); rtDeclareVariable(float3, up_corner, , ); rtDeclareVariable(float, scene_epsilon, , ); rtDeclareVariable(float, step, , ); rtDeclareVariable(float, color_multiplier, , ); // rtDeclareVariable(float, exp_multiplier, , ); // rtDeclareVariable(rtObject, smoke_object, , ); rtDeclareVariable(rtObject, top_opaque_object, , ); rtDeclareVariable(PerRayData_photon, prd, rtPayload, ); rtDeclareVariable(float, t_hit, rtIntersectionDistance, ); rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); //rtDeclareVariable(float, distance, attribute distance, ); rtDeclareVariable(float, front_box_hit_point, attribute front_box_hit_point, ); __device__ bool isInsideDomain(float3 point) { return !(point.x < low_corner.x || point.x > up_corner.x || point.y < low_corner.y || point.y > up_corner.y || point.z < low_corner.z || point.z > up_corner.z); } __device__ float3 operator*(float3 a, uint3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } __device__ float getIntensity(float3 point) { if (!isInsideDomain(point)) { return -1.f; } float3 tex_coord = (point - low_corner) / (up_corner - low_corner); return tex3D(smoke_dencities_tex, tex_coord.x, tex_coord.y, tex_coord.z); } RT_PROGRAM void any_hit_shadow() { // phongShadowed(); rtIgnoreIntersection(); } RT_PROGRAM void closest_hit_radiance() { float3 hit_point = ray.origin + (t_hit + scene_epsilon) * ray.direction; /* float t = t_hit - front_box_hit_point; if (t > scene_epsilon || t < -scene_epsilon) { prd.result = make_float3(1.f, 0.f, 0.f); return; }*/ PerRayData_radiance new_prd; new_prd.depth = 0; new_prd.importance = 1.f; optix::Ray next_ray = optix::make_Ray(ray.origin, ray.direction, 0, scene_epsilon, RT_DEFAULT_MAX); rtTrace(top_opaque_object, next_ray, new_prd); if (!isInsideDomain(hit_point)) { prd.result = 0.f; return; } if (new_prd.distance > 0 && new_prd.distance < prd.stop_distance) { prd.result = 0.f; return; } float density = getIntensity(hit_point); float accumulative_density = 0; float accumulative_step = front_box_hit_point; do { accumulative_density += density * step; // hit_point += ray.direction * step; density = getIntensity(hit_point); accumulative_step += step; } while (density >= 0.f && accumulative_step < prd.stop_distance); float res = color_multiplier * accumulative_density;//exp(-density_multiplier * accumulative_density); if (res > 1.0f) { res = 1.0f; } else if (res < 0.f) { res = 0.f; } prd.result = /*getIntensity(ray.origin + prd.stop_distance * ray.direction) * */(1.0f - res); }
119875cfd1353898d733225e8b8a226e5ccbe768.cu
//##################################################################### // Copyright 2011, Valeria Nikolaenko // This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt. //##################################################################### #include <optix_world.h> #include <optixu/optixu_math_namespace.h> #include "OPTIX_COMMONSTRUCTS.h" #include "OPTIX_HELPERS.h" #include "OPTIX_RAY_STRUCTS.h" using namespace optix; rtTextureSampler<float, 3, cudaReadModeElementType> smoke_dencities_tex; rtDeclareVariable(float3, low_corner, , ); rtDeclareVariable(float3, up_corner, , ); rtDeclareVariable(float, scene_epsilon, , ); rtDeclareVariable(float, step, , ); rtDeclareVariable(float, color_multiplier, , ); // rtDeclareVariable(float, exp_multiplier, , ); // rtDeclareVariable(rtObject, smoke_object, , ); rtDeclareVariable(rtObject, top_opaque_object, , ); rtDeclareVariable(PerRayData_photon, prd, rtPayload, ); rtDeclareVariable(float, t_hit, rtIntersectionDistance, ); rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); //rtDeclareVariable(float, distance, attribute distance, ); rtDeclareVariable(float, front_box_hit_point, attribute front_box_hit_point, ); __device__ bool isInsideDomain(float3 point) { return !(point.x < low_corner.x || point.x > up_corner.x || point.y < low_corner.y || point.y > up_corner.y || point.z < low_corner.z || point.z > up_corner.z); } __device__ float3 operator*(float3 a, uint3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } __device__ float getIntensity(float3 point) { if (!isInsideDomain(point)) { return -1.f; } float3 tex_coord = (point - low_corner) / (up_corner - low_corner); return tex3D(smoke_dencities_tex, tex_coord.x, tex_coord.y, tex_coord.z); } RT_PROGRAM void any_hit_shadow() { // phongShadowed(); rtIgnoreIntersection(); } RT_PROGRAM void closest_hit_radiance() { float3 hit_point = ray.origin + (t_hit + scene_epsilon) * ray.direction; /* float t = t_hit - front_box_hit_point; if (t > scene_epsilon || t < -scene_epsilon) { prd.result = make_float3(1.f, 0.f, 0.f); return; }*/ PerRayData_radiance new_prd; new_prd.depth = 0; new_prd.importance = 1.f; optix::Ray next_ray = optix::make_Ray(ray.origin, ray.direction, 0, scene_epsilon, RT_DEFAULT_MAX); rtTrace(top_opaque_object, next_ray, new_prd); if (!isInsideDomain(hit_point)) { prd.result = 0.f; return; } if (new_prd.distance > 0 && new_prd.distance < prd.stop_distance) { prd.result = 0.f; return; } float density = getIntensity(hit_point); float accumulative_density = 0; float accumulative_step = front_box_hit_point; do { accumulative_density += density * step; // hit_point += ray.direction * step; density = getIntensity(hit_point); accumulative_step += step; } while (density >= 0.f && accumulative_step < prd.stop_distance); float res = color_multiplier * accumulative_density;//exp(-density_multiplier * accumulative_density); if (res > 1.0f) { res = 1.0f; } else if (res < 0.f) { res = 0.f; } prd.result = /*getIntensity(ray.origin + prd.stop_distance * ray.direction) * */(1.0f - res); }
fff10b54807c8f13bc43e9d5a4ebc34d1f1c16ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <sstream> #include <omp.h> #include <stdlib.h> #include <vector> #include <stdio.h> #include <string.h> #include <queue> #include <algorithm> #include <cassert> using namespace std; void usage() { cout << "USAGE: ./exec <filename> <deviceNum>" << endl; exit(0); } inline hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } return result; } void ConvertToCRS(int* & target_ind, int* & start_ind, const vector<vector<int> > &adj_list, int RS) { int init = 0; for(int ii = 0; ii < RS ; ii++) { start_ind[ii] = init; int siz = adj_list[ii].size(); int where = 0; for(int jj = init; jj < init + siz; jj++) target_ind[jj] = adj_list[ii][where++]; init += siz; } start_ind[RS] = init; } __global__ void INIT(int* target_ind, int* start_ind, int* results, unsigned int startNode, int* SIZE) { int tid = threadIdx.x; int v = results[startNode]; int st = start_ind[v]; results[target_ind[st+tid]] = 1; atomicSub(SIZE,1); } __global__ void Check_BFS(int* results, int RS, int* SIZE, int* switchCtr, unsigned int TOTAL, int v) { int index = blockDim.x * blockIdx.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == -1) { atomicAdd(SIZE,1); } else if(results[jj] == v) { atomicAdd(switchCtr,1); } } } __global__ void BFS_Top_Down(int* target_ind, int* start_ind, int* results, int v, int RS, unsigned int TOTAL, int* SIZE) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == v) { atomicAdd(SIZE, 1); int start_loc = start_ind[jj]; int end_loc = start_ind[jj + 1]; int curr = results[jj]; for(int ii = start_loc; ii < end_loc; ii++) { if(results[target_ind[ii]] == -1) { results[target_ind[ii]] = curr + 1; } } } } } __global__ void BFS_Bottom_Up(int* target_ind, int* start_ind, int* results, int v, unsigned int RS, unsigned int TOTAL, int* SIZE) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == -1) { int start_loc = start_ind[jj]; int end_loc = start_ind[jj + 1]; int target; bool raviolli = false; for(int ii = start_loc; ii < end_loc && raviolli == false; ii++) { target = results[target_ind[ii]]; if(target == v) { results[jj] = target + 1; atomicAdd(SIZE, 1); raviolli = true; } } } } } __global__ void BFS_Bottom_Up_Directed(int* target_rev, int* start_rev, int* results, int v, unsigned int RS, unsigned int TOTAL, int* SIZE) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == -1) { int start_loc = start_rev[jj]; int end_loc = start_rev[jj + 1]; int target; bool raviolli = false; for(int ii = start_loc; ii < end_loc && raviolli == false; ii++) { target = results[target_rev[ii]]; if(target == v) { results[jj] = target + 1; atomicAdd(SIZE, 1); raviolli = true; } } } } } void BFS(int* & target_ind, int* & start_ind, int* & results, unsigned int RS, unsigned int NNS, unsigned int startNode) { results[startNode] = 0; int v = 0; int *res; int *target_arr; int *start_arr; checkCuda(hipMalloc((void **)&res, sizeof(int)*RS)); checkCuda(hipMalloc((void **)&target_arr, sizeof(int)*NNS)); checkCuda(hipMalloc((void **)&start_arr, sizeof(int)*(RS+1))); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); int *SIZE; int switchCtr = RS/32; int *size = (int*)(malloc(sizeof(int))); int *zero = (int*)(malloc(sizeof(int))); unsigned int CHUNK_SIZE = 1024; unsigned int LIMIT = (RS+CHUNK_SIZE-1) / CHUNK_SIZE; unsigned int TOTAL = LIMIT*CHUNK_SIZE; dim3 GRID(LIMIT); dim3 BLOCK(CHUNK_SIZE); *size = 1; *zero = 0; //hipHostMalloc(&size,sizeof(int)); checkCuda(hipMalloc((void**)&SIZE,sizeof(int))); checkCuda(hipMemcpy(res, results, sizeof(int)*RS,hipMemcpyHostToDevice)); checkCuda(hipMemcpy(start_arr, start_ind, sizeof(int)*(RS+1),hipMemcpyHostToDevice)); checkCuda(hipMemcpy(target_arr, target_ind, sizeof(int)*NNS,hipMemcpyHostToDevice)); double start = omp_get_wtime(); while(*size > 0) { hipMemcpy(SIZE, zero, sizeof(int), hipMemcpyHostToDevice); if(*size < switchCtr) { hipLaunchKernelGGL(( BFS_Top_Down), dim3(GRID),dim3(BLOCK), 0, 0, target_arr, start_arr, res, v, RS, TOTAL, SIZE); hipDeviceSynchronize(); } else { hipLaunchKernelGGL(( BFS_Bottom_Up), dim3(GRID),dim3(BLOCK), 0, 0, target_arr, start_arr, res, v, RS, TOTAL, SIZE); hipDeviceSynchronize(); } //*switchCtr = 0; //Check_BFS<<<GRID,BLOCK>>>(res, RS, SIZE, SWITCH, TOTAL, v); //hipDeviceSynchronize(); hipMemcpy(size, SIZE, sizeof(int),hipMemcpyDeviceToHost); v++; } double end = omp_get_wtime(); cout << "\tSource:" << startNode << "\tTime:" << end - start << " s" << endl; checkCuda(hipMemcpy(results, res, sizeof(int)*RS,hipMemcpyDeviceToHost)); free(size); free(zero); hipFree(SIZE); hipFree(res); hipFree(target_arr); hipFree(start_arr); } void BFS_Directed(int* & target_ind, int* & start_ind, int* & target_rev, int* & start_rev, int* & results, unsigned int RS, unsigned int NNS, unsigned int NNSrev, unsigned int startNode) { results[startNode] = 0; int v = 0; int *res; int *target_rev_arr; int *start_rev_arr; int *target_arr; int *start_arr; checkCuda(hipMalloc((void **)&res, sizeof(int)*RS)); checkCuda(hipMalloc((void **)&target_arr, sizeof(int)*NNS)); checkCuda(hipMalloc((void **)&start_arr, sizeof(int)*(RS+1))); checkCuda(hipMalloc((void **)&target_rev_arr, sizeof(int)*NNSrev)); checkCuda(hipMalloc((void **)&start_rev_arr, sizeof(int)*(RS+1))); //printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError())); int *SIZE; int switchCtr = RS/32; int *size = (int*)(malloc(sizeof(int))); int *zero = (int*)(malloc(sizeof(int))); unsigned int CHUNK_SIZE = 1024; unsigned int LIMIT = (RS+CHUNK_SIZE-1) / CHUNK_SIZE; unsigned int TOTAL = LIMIT*CHUNK_SIZE; dim3 GRID(LIMIT); dim3 BLOCK(CHUNK_SIZE); *size = 1; *zero = 0; //hipHostMalloc(&size,sizeof(int)); checkCuda(hipMalloc((void**)&SIZE,sizeof(int))); checkCuda(hipMemcpy(res, results, sizeof(int)*RS,hipMemcpyHostToDevice)); checkCuda(hipMemcpy(start_arr, start_ind, sizeof(int)*(RS+1),hipMemcpyHostToDevice)); checkCuda(hipMemcpy(target_arr, target_ind, sizeof(int)*NNS,hipMemcpyHostToDevice)); checkCuda(hipMemcpy(start_rev_arr, start_rev, sizeof(int)*(RS+1),hipMemcpyHostToDevice)); checkCuda(hipMemcpy(target_rev_arr, target_rev, sizeof(int)*NNSrev,hipMemcpyHostToDevice)); double start = omp_get_wtime(); while(*size > 0) { hipMemcpy(SIZE, zero, sizeof(int), hipMemcpyHostToDevice); if(*size < switchCtr) { hipLaunchKernelGGL(( BFS_Top_Down), dim3(GRID),dim3(BLOCK), 0, 0, target_arr, start_arr, res, v, RS, TOTAL, SIZE); hipDeviceSynchronize(); } else { hipLaunchKernelGGL(( BFS_Bottom_Up_Directed), dim3(GRID),dim3(BLOCK), 0, 0, target_rev_arr, start_rev_arr, res, v, RS, TOTAL, SIZE); hipDeviceSynchronize(); } //*switchCtr = 0; //Check_BFS<<<GRID,BLOCK>>>(res, RS, SIZE, SWITCH, TOTAL, v); //hipDeviceSynchronize(); v++; hipMemcpy(size, SIZE, sizeof(int),hipMemcpyDeviceToHost); } double end = omp_get_wtime(); cout << "\tSource:" << startNode << "\tTime:" << end - start << " s" << endl; checkCuda(hipMemcpy(results, res, sizeof(int)*RS,hipMemcpyDeviceToHost)); free(size); free(zero); hipFree(SIZE); hipFree(res); hipFree(target_rev_arr); hipFree(start_rev_arr); hipFree(target_arr); hipFree(start_arr); } int main(int argc, const char** argv) { if(argc != 3) usage(); int devId = 0; devId = atoi(argv[2]); hipSetDevice(devId); string line; vector<vector<int> > adj_list; vector<vector<int> > rev_list; //See which node you are on by index+min const char* filename = argv[1]; ifstream input (filename); if(input.fail()) return 0; //Find first node unsigned int mini = 10000000; while(getline(input,line)) { stringstream sf(line); int temp; if(line.find('%') != string::npos) continue; else { while(sf >> temp) { if(temp < mini) mini = temp; } } } //Set to begin input.clear(); input.seekg(0, ios::beg); int ctr = 0; bool mode = false; unsigned int NNS, RS, NNSrev; while(getline(input,line)) { stringstream ss(line); int temp, temp2, temp3; if(line.find("%%") != string::npos && line.find(" symmetric") != string::npos) mode = true; else if(mode) { if(line.find('%') != string::npos) continue; //First line containing row number, column number and NNS else if(!ctr) { ss >> temp >> temp2 >> temp3; NNS = temp3; adj_list.resize(temp); RS = temp; ctr++; } //Lines containing start and end of an edge else if(ctr) { ss >> temp >> temp2; if(temp != temp2) { adj_list[temp - mini].push_back(temp2 - mini); adj_list[temp2 - mini].push_back(temp - mini); } } } //Get past comment lines else if(!mode) { if(line.find('%') != string::npos) continue; //First line containing row number, column number and NNS else if(!ctr) { ss >> temp >> temp2 >> temp3; NNS = temp3; adj_list.resize(temp); rev_list.resize(temp); RS = temp; ctr++; } //Lines containing start and end of an edge else if(ctr) { ss >> temp >> temp2; if(temp != temp2) { adj_list[temp - mini].push_back(temp2 - mini); rev_list[temp2 - mini].push_back(temp - mini); } } } } //Remove duplicates NNS = 0; NNSrev = 0; for(int i = 0; i < adj_list.size(); i++) { sort(adj_list[i].begin(), adj_list[i].end()); adj_list[i].erase(unique(adj_list[i].begin(), adj_list[i].end()), adj_list[i].end()); NNS += adj_list[i].size(); if(!mode) { sort(rev_list[i].begin(), rev_list[i].end()); rev_list[i].erase(unique(rev_list[i].begin(), rev_list[i].end()), rev_list[i].end()); NNSrev += rev_list[i].size(); } } int* target_ind = (int*)(malloc(sizeof(int)*NNS)); //hipHostMalloc(&target_ind, sizeof(int)*NNS); int* start_ind = (int*)(malloc(sizeof(int)*(RS+1))); //hipHostMalloc(&start_ind, sizeof(int)*(RS+1)); int* results = (int*)(malloc(sizeof(int)*RS)); //hipHostMalloc(&results, sizeof(int)*RS); memset(target_ind, 0, sizeof(int)* NNS); memset(start_ind, 0, sizeof(int)* (RS+1)); memset(results, -1, sizeof(int)* RS); ConvertToCRS(target_ind, start_ind, adj_list, RS); int* target_rev; int* start_rev; if(!mode) { target_rev = (int*)(malloc(sizeof(int)*NNSrev)); start_rev = (int*)(malloc(sizeof(int)*(RS+1))); memset(target_rev, 0, sizeof(int)* NNSrev); memset(start_rev, 0, sizeof(int)* (RS+1)); ConvertToCRS(target_rev, start_rev, rev_list, RS); } //Start Node as parameter cout << "Graph converted to 0-base(Initial node is 0)\n"; unsigned int startNode; //cout << "Please enter the start node: "; //cin >> startNode; //cout << endl; startNode = 0; if(mode) BFS(target_ind, start_ind, results, RS, NNS, startNode); else BFS_Directed(target_ind, start_ind, target_rev, start_rev, results, RS, NNS, NNSrev, startNode); ofstream myfile; myfile.open("hybridcudaresults.txt"); for(int i = 0; i < RS; i++) myfile<< results[i] <<"\n"; myfile.close(); hipDeviceSynchronize(); //hipHostFree(target_ind); //hipHostFree(start_ind); //hipHostFree(results); if(!mode) { free(target_rev); free(start_rev); } free(target_ind); free(results); free(start_ind); return 0; }
fff10b54807c8f13bc43e9d5a4ebc34d1f1c16ce.cu
#include <iostream> #include <fstream> #include <sstream> #include <omp.h> #include <stdlib.h> #include <vector> #include <stdio.h> #include <string.h> #include <queue> #include <algorithm> #include <cassert> using namespace std; void usage() { cout << "USAGE: ./exec <filename> <deviceNum>" << endl; exit(0); } inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } void ConvertToCRS(int* & target_ind, int* & start_ind, const vector<vector<int> > &adj_list, int RS) { int init = 0; for(int ii = 0; ii < RS ; ii++) { start_ind[ii] = init; int siz = adj_list[ii].size(); int where = 0; for(int jj = init; jj < init + siz; jj++) target_ind[jj] = adj_list[ii][where++]; init += siz; } start_ind[RS] = init; } __global__ void INIT(int* target_ind, int* start_ind, int* results, unsigned int startNode, int* SIZE) { int tid = threadIdx.x; int v = results[startNode]; int st = start_ind[v]; results[target_ind[st+tid]] = 1; atomicSub(SIZE,1); } __global__ void Check_BFS(int* results, int RS, int* SIZE, int* switchCtr, unsigned int TOTAL, int v) { int index = blockDim.x * blockIdx.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == -1) { atomicAdd(SIZE,1); } else if(results[jj] == v) { atomicAdd(switchCtr,1); } } } __global__ void BFS_Top_Down(int* target_ind, int* start_ind, int* results, int v, int RS, unsigned int TOTAL, int* SIZE) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == v) { atomicAdd(SIZE, 1); int start_loc = start_ind[jj]; int end_loc = start_ind[jj + 1]; int curr = results[jj]; for(int ii = start_loc; ii < end_loc; ii++) { if(results[target_ind[ii]] == -1) { results[target_ind[ii]] = curr + 1; } } } } } __global__ void BFS_Bottom_Up(int* target_ind, int* start_ind, int* results, int v, unsigned int RS, unsigned int TOTAL, int* SIZE) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == -1) { int start_loc = start_ind[jj]; int end_loc = start_ind[jj + 1]; int target; bool raviolli = false; for(int ii = start_loc; ii < end_loc && raviolli == false; ii++) { target = results[target_ind[ii]]; if(target == v) { results[jj] = target + 1; atomicAdd(SIZE, 1); raviolli = true; } } } } } __global__ void BFS_Bottom_Up_Directed(int* target_rev, int* start_rev, int* results, int v, unsigned int RS, unsigned int TOTAL, int* SIZE) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int jj = index; jj < RS; jj+=TOTAL) { if(results[jj] == -1) { int start_loc = start_rev[jj]; int end_loc = start_rev[jj + 1]; int target; bool raviolli = false; for(int ii = start_loc; ii < end_loc && raviolli == false; ii++) { target = results[target_rev[ii]]; if(target == v) { results[jj] = target + 1; atomicAdd(SIZE, 1); raviolli = true; } } } } } void BFS(int* & target_ind, int* & start_ind, int* & results, unsigned int RS, unsigned int NNS, unsigned int startNode) { results[startNode] = 0; int v = 0; int *res; int *target_arr; int *start_arr; checkCuda(cudaMalloc((void **)&res, sizeof(int)*RS)); checkCuda(cudaMalloc((void **)&target_arr, sizeof(int)*NNS)); checkCuda(cudaMalloc((void **)&start_arr, sizeof(int)*(RS+1))); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); int *SIZE; int switchCtr = RS/32; int *size = (int*)(malloc(sizeof(int))); int *zero = (int*)(malloc(sizeof(int))); unsigned int CHUNK_SIZE = 1024; unsigned int LIMIT = (RS+CHUNK_SIZE-1) / CHUNK_SIZE; unsigned int TOTAL = LIMIT*CHUNK_SIZE; dim3 GRID(LIMIT); dim3 BLOCK(CHUNK_SIZE); *size = 1; *zero = 0; //cudaMallocHost(&size,sizeof(int)); checkCuda(cudaMalloc((void**)&SIZE,sizeof(int))); checkCuda(cudaMemcpy(res, results, sizeof(int)*RS,cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(start_arr, start_ind, sizeof(int)*(RS+1),cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(target_arr, target_ind, sizeof(int)*NNS,cudaMemcpyHostToDevice)); double start = omp_get_wtime(); while(*size > 0) { cudaMemcpy(SIZE, zero, sizeof(int), cudaMemcpyHostToDevice); if(*size < switchCtr) { BFS_Top_Down<<<GRID,BLOCK>>>(target_arr, start_arr, res, v, RS, TOTAL, SIZE); cudaDeviceSynchronize(); } else { BFS_Bottom_Up<<<GRID,BLOCK>>>(target_arr, start_arr, res, v, RS, TOTAL, SIZE); cudaDeviceSynchronize(); } //*switchCtr = 0; //Check_BFS<<<GRID,BLOCK>>>(res, RS, SIZE, SWITCH, TOTAL, v); //cudaDeviceSynchronize(); cudaMemcpy(size, SIZE, sizeof(int),cudaMemcpyDeviceToHost); v++; } double end = omp_get_wtime(); cout << "\tSource:" << startNode << "\tTime:" << end - start << " s" << endl; checkCuda(cudaMemcpy(results, res, sizeof(int)*RS,cudaMemcpyDeviceToHost)); free(size); free(zero); cudaFree(SIZE); cudaFree(res); cudaFree(target_arr); cudaFree(start_arr); } void BFS_Directed(int* & target_ind, int* & start_ind, int* & target_rev, int* & start_rev, int* & results, unsigned int RS, unsigned int NNS, unsigned int NNSrev, unsigned int startNode) { results[startNode] = 0; int v = 0; int *res; int *target_rev_arr; int *start_rev_arr; int *target_arr; int *start_arr; checkCuda(cudaMalloc((void **)&res, sizeof(int)*RS)); checkCuda(cudaMalloc((void **)&target_arr, sizeof(int)*NNS)); checkCuda(cudaMalloc((void **)&start_arr, sizeof(int)*(RS+1))); checkCuda(cudaMalloc((void **)&target_rev_arr, sizeof(int)*NNSrev)); checkCuda(cudaMalloc((void **)&start_rev_arr, sizeof(int)*(RS+1))); //printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError())); int *SIZE; int switchCtr = RS/32; int *size = (int*)(malloc(sizeof(int))); int *zero = (int*)(malloc(sizeof(int))); unsigned int CHUNK_SIZE = 1024; unsigned int LIMIT = (RS+CHUNK_SIZE-1) / CHUNK_SIZE; unsigned int TOTAL = LIMIT*CHUNK_SIZE; dim3 GRID(LIMIT); dim3 BLOCK(CHUNK_SIZE); *size = 1; *zero = 0; //cudaMallocHost(&size,sizeof(int)); checkCuda(cudaMalloc((void**)&SIZE,sizeof(int))); checkCuda(cudaMemcpy(res, results, sizeof(int)*RS,cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(start_arr, start_ind, sizeof(int)*(RS+1),cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(target_arr, target_ind, sizeof(int)*NNS,cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(start_rev_arr, start_rev, sizeof(int)*(RS+1),cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(target_rev_arr, target_rev, sizeof(int)*NNSrev,cudaMemcpyHostToDevice)); double start = omp_get_wtime(); while(*size > 0) { cudaMemcpy(SIZE, zero, sizeof(int), cudaMemcpyHostToDevice); if(*size < switchCtr) { BFS_Top_Down<<<GRID,BLOCK>>>(target_arr, start_arr, res, v, RS, TOTAL, SIZE); cudaDeviceSynchronize(); } else { BFS_Bottom_Up_Directed<<<GRID,BLOCK>>>(target_rev_arr, start_rev_arr, res, v, RS, TOTAL, SIZE); cudaDeviceSynchronize(); } //*switchCtr = 0; //Check_BFS<<<GRID,BLOCK>>>(res, RS, SIZE, SWITCH, TOTAL, v); //cudaDeviceSynchronize(); v++; cudaMemcpy(size, SIZE, sizeof(int),cudaMemcpyDeviceToHost); } double end = omp_get_wtime(); cout << "\tSource:" << startNode << "\tTime:" << end - start << " s" << endl; checkCuda(cudaMemcpy(results, res, sizeof(int)*RS,cudaMemcpyDeviceToHost)); free(size); free(zero); cudaFree(SIZE); cudaFree(res); cudaFree(target_rev_arr); cudaFree(start_rev_arr); cudaFree(target_arr); cudaFree(start_arr); } int main(int argc, const char** argv) { if(argc != 3) usage(); int devId = 0; devId = atoi(argv[2]); cudaSetDevice(devId); string line; vector<vector<int> > adj_list; vector<vector<int> > rev_list; //See which node you are on by index+min const char* filename = argv[1]; ifstream input (filename); if(input.fail()) return 0; //Find first node unsigned int mini = 10000000; while(getline(input,line)) { stringstream sf(line); int temp; if(line.find('%') != string::npos) continue; else { while(sf >> temp) { if(temp < mini) mini = temp; } } } //Set to begin input.clear(); input.seekg(0, ios::beg); int ctr = 0; bool mode = false; unsigned int NNS, RS, NNSrev; while(getline(input,line)) { stringstream ss(line); int temp, temp2, temp3; if(line.find("%%") != string::npos && line.find(" symmetric") != string::npos) mode = true; else if(mode) { if(line.find('%') != string::npos) continue; //First line containing row number, column number and NNS else if(!ctr) { ss >> temp >> temp2 >> temp3; NNS = temp3; adj_list.resize(temp); RS = temp; ctr++; } //Lines containing start and end of an edge else if(ctr) { ss >> temp >> temp2; if(temp != temp2) { adj_list[temp - mini].push_back(temp2 - mini); adj_list[temp2 - mini].push_back(temp - mini); } } } //Get past comment lines else if(!mode) { if(line.find('%') != string::npos) continue; //First line containing row number, column number and NNS else if(!ctr) { ss >> temp >> temp2 >> temp3; NNS = temp3; adj_list.resize(temp); rev_list.resize(temp); RS = temp; ctr++; } //Lines containing start and end of an edge else if(ctr) { ss >> temp >> temp2; if(temp != temp2) { adj_list[temp - mini].push_back(temp2 - mini); rev_list[temp2 - mini].push_back(temp - mini); } } } } //Remove duplicates NNS = 0; NNSrev = 0; for(int i = 0; i < adj_list.size(); i++) { sort(adj_list[i].begin(), adj_list[i].end()); adj_list[i].erase(unique(adj_list[i].begin(), adj_list[i].end()), adj_list[i].end()); NNS += adj_list[i].size(); if(!mode) { sort(rev_list[i].begin(), rev_list[i].end()); rev_list[i].erase(unique(rev_list[i].begin(), rev_list[i].end()), rev_list[i].end()); NNSrev += rev_list[i].size(); } } int* target_ind = (int*)(malloc(sizeof(int)*NNS)); //cudaMallocHost(&target_ind, sizeof(int)*NNS); int* start_ind = (int*)(malloc(sizeof(int)*(RS+1))); //cudaMallocHost(&start_ind, sizeof(int)*(RS+1)); int* results = (int*)(malloc(sizeof(int)*RS)); //cudaMallocHost(&results, sizeof(int)*RS); memset(target_ind, 0, sizeof(int)* NNS); memset(start_ind, 0, sizeof(int)* (RS+1)); memset(results, -1, sizeof(int)* RS); ConvertToCRS(target_ind, start_ind, adj_list, RS); int* target_rev; int* start_rev; if(!mode) { target_rev = (int*)(malloc(sizeof(int)*NNSrev)); start_rev = (int*)(malloc(sizeof(int)*(RS+1))); memset(target_rev, 0, sizeof(int)* NNSrev); memset(start_rev, 0, sizeof(int)* (RS+1)); ConvertToCRS(target_rev, start_rev, rev_list, RS); } //Start Node as parameter cout << "Graph converted to 0-base(Initial node is 0)\n"; unsigned int startNode; //cout << "Please enter the start node: "; //cin >> startNode; //cout << endl; startNode = 0; if(mode) BFS(target_ind, start_ind, results, RS, NNS, startNode); else BFS_Directed(target_ind, start_ind, target_rev, start_rev, results, RS, NNS, NNSrev, startNode); ofstream myfile; myfile.open("hybridcudaresults.txt"); for(int i = 0; i < RS; i++) myfile<< results[i] <<"\n"; myfile.close(); cudaDeviceSynchronize(); //cudaFreeHost(target_ind); //cudaFreeHost(start_ind); //cudaFreeHost(results); if(!mode) { free(target_rev); free(start_rev); } free(target_ind); free(results); free(start_ind); return 0; }
2c4a0019123d917eb3a10e83bf149a0325ed701c.hip
// !!! This is a file automatically generated by hipify!!! // // UnaryExecution.cpp // MNN // // Created by MNN on 2019/02/28. // Copyright 2018, Alibaba Group Holding Limited // #include "UnaryExecution.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" #include "Raster.cuh" #include "backend/cuda/core/CUDABackend.hpp" #include <hip/hip_runtime.h> namespace MNN { namespace CUDA { void callUnary(void *input, void *output, size_t count, MNN::CUDARuntime* runtime, halide_type_t data_type, MNN::UnaryOpOperation op_type) { Tensor::InsideDescribe::Region reg; reg.size[2] = count; UnaryBlit((uint8_t*)output, (const uint8_t*)input, reg.size, reg.src.stride, reg.dst.stride, data_type.bytes(), runtime, op_type); return; } UnaryExecution::UnaryExecution(UnaryOpOperation opType, Backend* backend) : Execution(backend) { auto cudaBackend = static_cast<CUDABackend*>(backend); mRuntime = cudaBackend->getCUDARuntime(); mOpType = opType; } ErrorCode UnaryExecution::onResize(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) { auto shape = inputs[0]->shape(); mCount = CUDABackend::realSize(inputs[0]); return NO_ERROR; } ErrorCode UnaryExecution::onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) { #ifdef LOG_VERBOSE MNN_PRINT("start UnaryExecution onExecute..."); #endif auto type = inputs[0]->getType(); if (static_cast<CUDABackend*>(backend())->useFp16()) { type.bits = 16; } //MNN_PRINT("unary size:%d\n", mCount); callUnary((void*)inputs[0]->deviceId(), (void*)outputs[0]->deviceId(), mCount, mRuntime, type, mOpType); #ifdef LOG_VERBOSE MNN_PRINT("end UnaryExecution onExecute..."); #endif return NO_ERROR; } __global__ void RELU(const float *input, float *output, size_t count, float slope) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float x = input[i]; float y = x > 0 ? x : x * slope; output[i] = y; } return; } __global__ void RELU_Half(const half *input, half *output, size_t count, float slope) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float x = input[i]; float y = x > 0 ? x : x * slope; output[i] = (half)y; } return; } __global__ void RELU_INT8(const int8_t *input, int8_t *output, size_t count, int8_t zeroPoint) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { int8_t x = input[i]; int8_t y = x > zeroPoint ? x : zeroPoint; output[i] = y; } return; } class ReluExecution : public Execution { public: ReluExecution(Backend* bn, float slope) : Execution(bn) { mSlope = slope; } virtual ~ReluExecution() = default; ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto count = CUDABackend::realSize(inputs[0]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); auto input = inputs[0]->deviceId(); auto output = outputs[0]->deviceId(); if (TensorUtils::getDescribe(outputs[0])->quantAttr != nullptr && TensorUtils::getDescribe(outputs[0])->type == DataType_DT_INT8) { auto inInfo = TensorUtils::getQuantInfo(inputs[0]); auto outInfo = TensorUtils::getQuantInfo(outputs[0]); if (inInfo != outInfo) { MNN_PRINT("this relu int8 implementation has error when input output quant info mismatch\n"); } if(mSlope > 0.0f || mSlope < 0.0f) { MNN_PRINT("Warning, CUDA only support Relu int8, PReLU int8 not support yet!\n"); } int8_t zeroPoint = int8_t(outInfo[1]); hipLaunchKernelGGL(( RELU_INT8), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output, count, zeroPoint); checkKernelErrors; return NO_ERROR; } if (static_cast<CUDABackend*>(backend())->useFp16()) { hipLaunchKernelGGL(( RELU_Half), dim3(block_num), dim3(threads_num), 0, 0, (half*)input, (half*)output, count, mSlope); checkKernelErrors; } else { hipLaunchKernelGGL(( RELU), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (float*)output, count, mSlope); checkKernelErrors; } return NO_ERROR; } private: float mSlope; }; template<typename T> __global__ void CLAMP(const T *input, T *output, size_t count, float minV, float maxV) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float x = input[i]; float y = min(max(x, minV), maxV); output[i] = y; } return; } class Relu6Execution : public Execution { public: Relu6Execution(Backend* bn, float minV, float maxV) : Execution(bn) { mMinV = minV; mMaxV = maxV; } virtual ~Relu6Execution() = default; ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto count = CUDABackend::realSize(inputs[0]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); auto input = inputs[0]->deviceId(); auto output = outputs[0]->deviceId(); if (static_cast<CUDABackend*>(backend())->useFp16()) { hipLaunchKernelGGL(( CLAMP), dim3(block_num), dim3(threads_num), 0, 0, (half*)input, (half*)output, count, mMinV, mMaxV); } else { hipLaunchKernelGGL(( CLAMP), dim3(block_num), dim3(threads_num), 0, 0, (float*)input, (float*)output, count, mMinV, mMaxV); } return NO_ERROR; } private: float mMinV; float mMaxV; }; class UnaryCreator : public CUDABackend::Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override { if (op->type() == OpType_UnaryOp) { return new UnaryExecution(op->main_as_UnaryOp()->opType(), backend); } if (op->type() == OpType_Sigmoid) { return new UnaryExecution(UnaryOpOperation_SIGMOID, backend); } if (op->type() == OpType_TanH) { return new UnaryExecution(UnaryOpOperation_TANH, backend); } if (op->type() == OpType_ReLU) { float slope = 0.0f; if (nullptr != op->main_as_Relu()) { slope = op->main_as_Relu()->slope(); } return new ReluExecution(backend, slope); } if (op->type() == OpType_ReLU6) { float minV = 0.0f; float maxV = 6.0f; if (nullptr != op->main()) { auto p = op->main_as_Relu6(); minV = p->minValue(); maxV = p->maxValue(); } return new Relu6Execution(backend, minV, maxV); } return nullptr; } }; CUDACreatorRegister<UnaryCreator> __UnaryExecution(OpType_UnaryOp); CUDACreatorRegister<UnaryCreator> __SigmoidExecution(OpType_Sigmoid); CUDACreatorRegister<UnaryCreator> __TanhExecution(OpType_TanH); CUDACreatorRegister<UnaryCreator> __ReluExecution(OpType_ReLU); CUDACreatorRegister<UnaryCreator> __Relu6Execution(OpType_ReLU6); } // namespace CUDA } // namespace MNN
2c4a0019123d917eb3a10e83bf149a0325ed701c.cu
// // UnaryExecution.cpp // MNN // // Created by MNN on 2019/02/28. // Copyright © 2018, Alibaba Group Holding Limited // #include "UnaryExecution.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" #include "Raster.cuh" #include "backend/cuda/core/CUDABackend.hpp" #include <cuda_runtime.h> namespace MNN { namespace CUDA { void callUnary(void *input, void *output, size_t count, MNN::CUDARuntime* runtime, halide_type_t data_type, MNN::UnaryOpOperation op_type) { Tensor::InsideDescribe::Region reg; reg.size[2] = count; UnaryBlit((uint8_t*)output, (const uint8_t*)input, reg.size, reg.src.stride, reg.dst.stride, data_type.bytes(), runtime, op_type); return; } UnaryExecution::UnaryExecution(UnaryOpOperation opType, Backend* backend) : Execution(backend) { auto cudaBackend = static_cast<CUDABackend*>(backend); mRuntime = cudaBackend->getCUDARuntime(); mOpType = opType; } ErrorCode UnaryExecution::onResize(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) { auto shape = inputs[0]->shape(); mCount = CUDABackend::realSize(inputs[0]); return NO_ERROR; } ErrorCode UnaryExecution::onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) { #ifdef LOG_VERBOSE MNN_PRINT("start UnaryExecution onExecute..."); #endif auto type = inputs[0]->getType(); if (static_cast<CUDABackend*>(backend())->useFp16()) { type.bits = 16; } //MNN_PRINT("unary size:%d\n", mCount); callUnary((void*)inputs[0]->deviceId(), (void*)outputs[0]->deviceId(), mCount, mRuntime, type, mOpType); #ifdef LOG_VERBOSE MNN_PRINT("end UnaryExecution onExecute..."); #endif return NO_ERROR; } __global__ void RELU(const float *input, float *output, size_t count, float slope) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float x = input[i]; float y = x > 0 ? x : x * slope; output[i] = y; } return; } __global__ void RELU_Half(const half *input, half *output, size_t count, float slope) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float x = input[i]; float y = x > 0 ? x : x * slope; output[i] = (half)y; } return; } __global__ void RELU_INT8(const int8_t *input, int8_t *output, size_t count, int8_t zeroPoint) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { int8_t x = input[i]; int8_t y = x > zeroPoint ? x : zeroPoint; output[i] = y; } return; } class ReluExecution : public Execution { public: ReluExecution(Backend* bn, float slope) : Execution(bn) { mSlope = slope; } virtual ~ReluExecution() = default; ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto count = CUDABackend::realSize(inputs[0]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); auto input = inputs[0]->deviceId(); auto output = outputs[0]->deviceId(); if (TensorUtils::getDescribe(outputs[0])->quantAttr != nullptr && TensorUtils::getDescribe(outputs[0])->type == DataType_DT_INT8) { auto inInfo = TensorUtils::getQuantInfo(inputs[0]); auto outInfo = TensorUtils::getQuantInfo(outputs[0]); if (inInfo != outInfo) { MNN_PRINT("this relu int8 implementation has error when input output quant info mismatch\n"); } if(mSlope > 0.0f || mSlope < 0.0f) { MNN_PRINT("Warning, CUDA only support Relu int8, PReLU int8 not support yet!\n"); } int8_t zeroPoint = int8_t(outInfo[1]); RELU_INT8<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output, count, zeroPoint); checkKernelErrors; return NO_ERROR; } if (static_cast<CUDABackend*>(backend())->useFp16()) { RELU_Half<<<block_num, threads_num>>>((half*)input, (half*)output, count, mSlope); checkKernelErrors; } else { RELU<<<block_num, threads_num>>>((float*)input, (float*)output, count, mSlope); checkKernelErrors; } return NO_ERROR; } private: float mSlope; }; template<typename T> __global__ void CLAMP(const T *input, T *output, size_t count, float minV, float maxV) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float x = input[i]; float y = min(max(x, minV), maxV); output[i] = y; } return; } class Relu6Execution : public Execution { public: Relu6Execution(Backend* bn, float minV, float maxV) : Execution(bn) { mMinV = minV; mMaxV = maxV; } virtual ~Relu6Execution() = default; ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto count = CUDABackend::realSize(inputs[0]); int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); auto input = inputs[0]->deviceId(); auto output = outputs[0]->deviceId(); if (static_cast<CUDABackend*>(backend())->useFp16()) { CLAMP<<<block_num, threads_num>>>((half*)input, (half*)output, count, mMinV, mMaxV); } else { CLAMP<<<block_num, threads_num>>>((float*)input, (float*)output, count, mMinV, mMaxV); } return NO_ERROR; } private: float mMinV; float mMaxV; }; class UnaryCreator : public CUDABackend::Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override { if (op->type() == OpType_UnaryOp) { return new UnaryExecution(op->main_as_UnaryOp()->opType(), backend); } if (op->type() == OpType_Sigmoid) { return new UnaryExecution(UnaryOpOperation_SIGMOID, backend); } if (op->type() == OpType_TanH) { return new UnaryExecution(UnaryOpOperation_TANH, backend); } if (op->type() == OpType_ReLU) { float slope = 0.0f; if (nullptr != op->main_as_Relu()) { slope = op->main_as_Relu()->slope(); } return new ReluExecution(backend, slope); } if (op->type() == OpType_ReLU6) { float minV = 0.0f; float maxV = 6.0f; if (nullptr != op->main()) { auto p = op->main_as_Relu6(); minV = p->minValue(); maxV = p->maxValue(); } return new Relu6Execution(backend, minV, maxV); } return nullptr; } }; CUDACreatorRegister<UnaryCreator> __UnaryExecution(OpType_UnaryOp); CUDACreatorRegister<UnaryCreator> __SigmoidExecution(OpType_Sigmoid); CUDACreatorRegister<UnaryCreator> __TanhExecution(OpType_TanH); CUDACreatorRegister<UnaryCreator> __ReluExecution(OpType_ReLU); CUDACreatorRegister<UnaryCreator> __Relu6Execution(OpType_ReLU6); } // namespace CUDA } // namespace MNN
cc108c6051b774013946e0535deac7f1c47691b6.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_sssp.cu * * @brief Simple test driver program for single source shorest path. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <iostream> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> #include <gunrock/graphio/rmat.cuh> #include <gunrock/graphio/rgg.cuh> // SSSP includes #include <gunrock/app/sssp/sssp_enactor.cuh> #include <gunrock/app/sssp/sssp_problem.cuh> #include <gunrock/app/sssp/sssp_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <gunrock/priority_queue/kernel.cuh> #include <moderngpu.cuh> // Boost includes for CPU dijkstra SSSP reference algorithms #include <boost/config.hpp> #include <boost/graph/graph_traits.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/dijkstra_shortest_paths.hpp> #include <boost/property_map/property_map.hpp> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::sssp; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( " test_sssp <graph type> <graph type args> [--device=<device_index>]\n" " [--undirected] [--instrumented] [--src=<source index>] [--quick=<0|1>]\n" " [--mark-pred] [--queue-sizing=<scale factor>] [--traversal-mode=<0|1>]\n" " [--in-sizing=<in/out queue scale factor>] [--disable-size-check]\n" " [--grid-size=<grid size>] [partition_method=<random|biasrandom|clustered|metis>]\n" " [--v] [--iteration-num=<num>]\n" "\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n" " edges from stdin (or from the optionally-specified file).\n" " --device=<device_index> Set GPU device for running the test. [Default: 0].\n" " --undirected Treat the graph as undirected (symmetric).\n" " --instrumented Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty\n" " (a relative indicator of load imbalance.)\n" " --src=<source vertex id> Begins SSSP from the source [Default: 0].\n" " If randomize: from a random source vertex.\n" " If largestdegree: from largest degree vertex.\n" " --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n" " --mark-pred Keep both label info and predecessor info.\n" " --queue-sizing=<factor> Allocates a frontier queue sized at:\n" " (graph-edges * <scale factor>) [Default: 1.0].\n" " --v Print verbose per iteration debug info.\n" " --iteration-num=<number> Number of runs to perform the test [Default: 1].\n" " --traversal-mode=<0 or 1> Set traversal strategy, 0 for Load-Balanced,\n" " 1 for Dynamic-Cooperative [Default: dynamic\n" " determine based on average degree].\n" ); } /** * @brief Displays the SSSP result (i.e., distance from source) * * @tparam VertexId * @tparam SizeT * * @param[in] source_path Search depth from the source for each node. * @param[in] num_nodes Number of nodes in the graph. */ template<typename VertexId, typename SizeT> void DisplaySolution (VertexId *source_path, SizeT num_nodes) { if (num_nodes > 40) num_nodes = 40; printf("["); for (VertexId i = 0; i < num_nodes; ++i) { PrintValue(i); printf(":"); PrintValue(source_path[i]); printf(" "); } printf("]\n"); } /** * Performance/Evaluation statistics */ struct Stats { const char *name; Statistic rate; Statistic search_depth; Statistic redundant_work; Statistic duty; Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {} Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {} }; /** * @brief Test_Parameter structure */ struct Test_Parameter : gunrock::app::TestParameter_Base { public: //bool mark_predecessors ;// Mark src-distance vs. parent vertices int delta_factor; double max_queue_sizing1; Test_Parameter() { delta_factor = 16; mark_predecessors = false; max_queue_sizing1 = -1.0; } ~Test_Parameter() { } void Init(CommandLineArgs &args) { TestParameter_Base::Init(args); mark_predecessors = args.CheckCmdLineFlag("mark-pred"); args.GetCmdLineArgument("delta-factor" , delta_factor ); args.GetCmdLineArgument("queue-sizing1", max_queue_sizing1); } }; /** * @brief Displays timing and correctness statistics * * @tparam MARK_PREDECESSORS * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] stats Reference to the Stats object defined in RunTests * @param[in] src Source node where SSSP starts * @param[in] h_labels Host-side vector stores computed labels for validation * @param[in] graph Reference to the CSR graph we process on * @param[in] elapsed Total elapsed kernel running time * @param[in] search_depth Maximum search depth of the SSSP algorithm * @param[in] total_queued Total element queued in SSSP kernel running process * @param[in] avg_duty Average duty of the SSSP kernels */ template< typename VertexId, typename Value, typename SizeT> void DisplayStats( Stats &stats, VertexId src, Value *h_labels, const Csr<VertexId, Value, SizeT> &graph, double elapsed, VertexId search_depth, long long total_queued, double avg_duty) { // Compute nodes and edges visited SizeT edges_visited = 0; SizeT nodes_visited = 0; for (VertexId i = 0; i < graph.nodes; ++i) { if (h_labels[i] < util::MaxValue<VertexId>()) { ++nodes_visited; edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i]; } } double redundant_work = 0.0; if (total_queued > 0) { redundant_work = ((double) total_queued - edges_visited) / edges_visited; } redundant_work *= 100; // Display test name printf("[%s] finished.", stats.name); // Display statistics if (nodes_visited < 5) { printf("Fewer than 5 vertices visited.\n"); } else { // Display the specific sample statistics double m_teps = (double) edges_visited / (elapsed * 1000.0); printf("\n elapsed: %.4f ms, rate: %.4f MiEdges/s", elapsed, m_teps); if (search_depth != 0) printf(", search_depth: %lld", (long long) search_depth); printf("\n src: %lld, nodes_visited: %lld, edges_visited: %lld", (long long) src, (long long) nodes_visited, (long long) edges_visited); if (avg_duty != 0) { printf("\n avg CTA duty: %.2f%%", avg_duty * 100); } if (total_queued > 0) { printf(", total queued: %lld", total_queued); } if (redundant_work > 0) { printf(", redundant work: %.2f%%", redundant_work); } printf("\n"); } } /****************************************************************************** * SSSP Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference SSSP ranking implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_values Host-side vector to store CPU computed labels for each node * @param[in] node_preds Host-side vector to store CPU computed predecessors for each node * @param[in] src Source node where SSSP starts */ template< typename VertexId, typename Value, typename SizeT, bool MARK_PREDECESSORS> void SimpleReferenceSssp( const Csr<VertexId, Value, SizeT> &graph, Value *node_values, VertexId *node_preds, VertexId src) { using namespace boost; // Prepare Boost Datatype and Data structure typedef adjacency_list<vecS, vecS, directedS, no_property, property <edge_weight_t, unsigned int> > Graph; typedef graph_traits<Graph>::vertex_descriptor vertex_descriptor; typedef graph_traits<Graph>::edge_descriptor edge_descriptor; typedef std::pair<VertexId, VertexId> Edge; Edge *edges = ( Edge*)malloc(sizeof( Edge)*graph.edges); Value *weight = (Value*)malloc(sizeof(Value)*graph.edges); for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j) { edges[j] = Edge(i, graph.column_indices[j]); weight[j] = graph.edge_values[j]; } } Graph g(edges, edges + graph.edges, weight, graph.nodes); std::vector<Value> d(graph.nodes); std::vector<vertex_descriptor> p(graph.nodes); vertex_descriptor s = vertex(src, g); property_map<Graph, vertex_index_t>::type indexmap = get(vertex_index, g); // // Perform SSSP // CpuTimer cpu_timer; cpu_timer.Start(); if (MARK_PREDECESSORS) { dijkstra_shortest_paths(g, s, predecessor_map(boost::make_iterator_property_map( p.begin(), get(boost::vertex_index, g))).distance_map( boost::make_iterator_property_map( d.begin(), get(boost::vertex_index, g)))); } else { dijkstra_shortest_paths(g, s, distance_map(boost::make_iterator_property_map( d.begin(), get(boost::vertex_index, g)))); } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); printf("CPU SSSP finished in %lf msec.\n", elapsed); Coo<Value, Value>* sort_dist = NULL; Coo<VertexId, VertexId>* sort_pred = NULL; sort_dist = (Coo<Value, Value>*)malloc( sizeof(Coo<Value, Value>) * graph.nodes); if (MARK_PREDECESSORS) { sort_pred = (Coo<VertexId, VertexId>*)malloc( sizeof(Coo<VertexId, VertexId>) * graph.nodes); } graph_traits < Graph >::vertex_iterator vi, vend; for (tie(vi, vend) = vertices(g); vi != vend; ++vi) { sort_dist[(*vi)].row = (*vi); sort_dist[(*vi)].col = d[(*vi)]; } std::stable_sort( sort_dist, sort_dist + graph.nodes, RowFirstTupleCompare<Coo<Value, Value> >); if (MARK_PREDECESSORS) { for (tie(vi, vend) = vertices(g); vi != vend; ++vi) { sort_pred[(*vi)].row = (*vi); sort_pred[(*vi)].col = p[(*vi)]; } std::stable_sort( sort_pred, sort_pred + graph.nodes, RowFirstTupleCompare< Coo<VertexId, VertexId> >); } for (int i = 0; i < graph.nodes; ++i) { node_values[i] = sort_dist[i].col; } if (MARK_PREDECESSORS) { for (int i = 0; i < graph.nodes; ++i) { node_preds[i] = sort_pred[i].col; } } if (sort_dist) free(sort_dist); if (sort_pred) free(sort_pred); } /** * @brief Run SSSP tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam MARK_PREDECESSORS * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK, bool MARK_PREDECESSORS> void RunTests(Test_Parameter *parameter) { typedef SSSPProblem< VertexId, SizeT, Value, MARK_PREDECESSORS> Problem; typedef SSSPEnactor< Problem, INSTRUMENT, DEBUG, SIZE_CHECK> Enactor; Csr<VertexId, Value, SizeT> *graph = (Csr<VertexId, Value, SizeT>*)parameter->graph; VertexId src = (VertexId)parameter -> src; int max_grid_size = parameter -> max_grid_size; int num_gpus = parameter -> num_gpus; double max_queue_sizing = parameter -> max_queue_sizing; double max_in_sizing = parameter -> max_in_sizing; ContextPtr *context = (ContextPtr*)parameter -> context; std::string partition_method = parameter -> partition_method; int *gpu_idx = parameter -> gpu_idx; hipStream_t *streams = parameter -> streams; float partition_factor = parameter -> partition_factor; int partition_seed = parameter -> partition_seed; bool g_quick = parameter -> g_quick; bool g_stream_from_host = parameter -> g_stream_from_host; int delta_factor = parameter -> delta_factor; int iterations = parameter -> iterations; int traversal_mode = parameter -> traversal_mode; size_t *org_size = new size_t[num_gpus]; // Allocate host-side label array (for both reference and gpu-computed results) Value *reference_labels = new Value[graph->nodes]; Value *h_labels = new Value[graph->nodes]; Value *reference_check_label = (g_quick) ? NULL : reference_labels; VertexId *reference_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL; VertexId *h_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL; VertexId *reference_check_pred = (g_quick || !MARK_PREDECESSORS) ? NULL : reference_preds; for (int gpu=0;gpu<num_gpus;gpu++) { size_t dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&(org_size[gpu]),&dummy); } // Allocate SSSP enactor map Enactor* enactor = new Enactor(num_gpus, gpu_idx); // Allocate problem on GPU Problem *problem = new Problem; util::GRError(problem->Init( g_stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, delta_factor, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "Problem SSSP Initialization Failed", __FILE__, __LINE__); util::GRError(enactor->Init (context, problem, max_grid_size, traversal_mode), "SSSP Enactor init failed", __FILE__, __LINE__); // // Compute reference CPU SSSP solution for source-distance // if (reference_check_label != NULL) { printf("Computing reference value ...\n"); SimpleReferenceSssp<VertexId, Value, SizeT, MARK_PREDECESSORS>( *graph, reference_check_label, reference_check_pred, src); printf("\n"); } Stats *stats = new Stats("GPU SSSP"); long long total_queued = 0; VertexId search_depth = 0; double avg_duty = 0.0; float elapsed = 0.0f; // Perform SSSP CpuTimer cpu_timer; for (int iter = 0; iter < iterations; ++iter) { util::GRError(problem->Reset(src, enactor->GetFrontierType(), max_queue_sizing), "SSSP Problem Data Reset Failed", __FILE__, __LINE__); util::GRError(enactor->Reset(), "SSSP Enactor Reset failed", __FILE__, __LINE__); printf("__________________________\n");fflush(stdout); cpu_timer.Start(); util::GRError(enactor->Enact(src, traversal_mode), "SSSP Problem Enact Failed", __FILE__, __LINE__); cpu_timer.Stop(); printf("--------------------------\n");fflush(stdout); elapsed += cpu_timer.ElapsedMillis(); } elapsed /= iterations; enactor->GetStatistics(total_queued, search_depth, avg_duty); // Copy out results util::GRError(problem->Extract(h_labels, h_preds), "SSSP Problem Data Extraction Failed", __FILE__, __LINE__); for (SizeT i=0; i<graph->nodes;i++) if (reference_check_label[i]==-1) reference_check_label[i]=util::MaxValue<Value>(); // Display Solution printf("\nFirst 40 labels of the GPU result.\n"); DisplaySolution(h_labels, graph->nodes); // Verify the result if (reference_check_label != NULL) { printf("Label Validity: "); int error_num = CompareResults(h_labels, reference_check_label, graph->nodes, true); if (error_num > 0) printf("%d errors occurred.\n", error_num); printf("\nFirst 40 labels of the reference CPU result.\n"); DisplaySolution(reference_check_label, graph->nodes); } if (MARK_PREDECESSORS) { printf("\nFirst 40 preds of the GPU result.\n"); DisplaySolution(h_preds, graph->nodes); if (reference_check_label != NULL) { printf("\nFirst 40 preds of the reference CPU result (could be different because the paths are not unique).\n"); DisplaySolution(reference_check_pred, graph->nodes); } } DisplayStats( *stats, src, h_labels, *graph, elapsed, search_depth, total_queued, avg_duty); printf("\n\tMemory Usage(B)\t"); for (int gpu=0;gpu<num_gpus;gpu++) if (num_gpus>1) {if (gpu!=0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1",gpu,gpu,gpu,gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);} else printf(" #keys%d,0\t #keys%d,1", gpu, gpu); if (num_gpus>1) printf(" #keys%d",num_gpus); printf("\n"); double max_queue_sizing_[2] = {0,0}, max_in_sizing_=0; for (int gpu=0;gpu<num_gpus;gpu++) { size_t gpu_free,dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&gpu_free,&dummy); printf("GPU_%d\t %ld",gpu_idx[gpu],org_size[gpu]-gpu_free); for (int i=0;i<num_gpus;i++) { for (int j=0; j<2; j++) { SizeT x=problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0*x/(num_gpus>1?problem->graph_slices[gpu]->in_counter[i]:problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) max_queue_sizing_[j]=factor; } if (num_gpus>1 && i!=0 ) for (int t=0;t<2;t++) { SizeT x=problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0*x/problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) max_in_sizing_=factor; } } if (num_gpus>1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus>1) printf("\t in_sizing =\t %lf", max_in_sizing_); printf("\n"); // Cleanup if (org_size ) {delete[] org_size ; org_size = NULL;} if (stats ) {delete stats ; stats = NULL;} if (enactor ) {delete enactor ; enactor = NULL;} if (problem ) {delete problem ; problem = NULL;} if (reference_labels) {delete[] reference_labels; reference_labels = NULL;} if (h_labels ) {delete[] h_labels ; h_labels = NULL;} if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;} if (h_preds ) {delete[] h_preds ; h_preds = NULL;} } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK> void RunTests_mark_predecessors(Test_Parameter *parameter) { if (parameter->mark_predecessors) RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK, true > (parameter); else RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG> void RunTests_size_check(Test_Parameter *parameter) { if (parameter->size_check) RunTests_mark_predecessors <VertexId, Value, SizeT, INSTRUMENT, DEBUG, true > (parameter); else RunTests_mark_predecessors <VertexId, Value, SizeT, INSTRUMENT, DEBUG, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT> void RunTests_debug(Test_Parameter *parameter) { if (parameter->debug) RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, true > (parameter); else RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT> void RunTests_instrumented(Test_Parameter *parameter) { if (parameter->instrumented) RunTests_debug <VertexId, Value, SizeT, true > (parameter); else RunTests_debug <VertexId, Value, SizeT, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Pointer to the CSR graph we process on * @param[in] args Reference to the command line arguments * @param[in] num_gpus Number of GPUs to run algorithm * @param[in] context CudaContext pointer for ModernGPU APIs * @param[in] gpu_idx GPU(s) used to run algorithm * @param[in] streams CUDA streams */ template < typename VertexId, typename Value, typename SizeT> void RunTests( Csr<VertexId, Value, SizeT> *graph, CommandLineArgs &args, int num_gpus, ContextPtr *context, int *gpu_idx, hipStream_t *streams) { string src_str = ""; Test_Parameter *parameter = new Test_Parameter; parameter -> Init(args); parameter -> graph = graph; parameter -> num_gpus = num_gpus; parameter -> context = context; parameter -> gpu_idx = gpu_idx; parameter -> streams = streams; // source vertex to start args.GetCmdLineArgument("src", src_str); if (src_str.empty()) { parameter->src = 0; } else if (src_str.compare("randomize") == 0) { parameter->src = graphio::RandomNode(graph->nodes); } else if (src_str.compare("largestdegree") == 0) { int max_degree; parameter->src = graph->GetNodeWithHighestDegree(max_degree); } else { args.GetCmdLineArgument("src", parameter->src); } // traversal mode args.GetCmdLineArgument("traversal-mode", parameter->traversal_mode); if (parameter->traversal_mode == -1) { parameter->traversal_mode = 0; } printf("src = %lld\n", parameter->src); RunTests_instrumented<VertexId, Value, SizeT>(parameter); } /****************************************************************************** * Main ******************************************************************************/ int main( int argc, char** argv) { CommandLineArgs args(argc, argv); int num_gpus = 0; int *gpu_idx = NULL; ContextPtr *context = NULL; hipStream_t *streams = NULL; bool g_undirected = false; if ((argc < 2) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } if (args.CheckCmdLineFlag ("device")) { std::vector<int> gpus; args.GetCmdLineArguments<int>("device",gpus); num_gpus = gpus.size(); gpu_idx = new int[num_gpus]; for (int i=0;i<num_gpus;i++) gpu_idx[i] = gpus[i]; } else { num_gpus = 1; gpu_idx = new int[num_gpus]; gpu_idx[0] = 0; } streams = new hipStream_t[num_gpus * num_gpus *2]; context = new ContextPtr [num_gpus * num_gpus]; printf("Using %d gpus: ", num_gpus); for (int gpu=0;gpu<num_gpus;gpu++) { printf(" %d ", gpu_idx[gpu]); util::SetDevice(gpu_idx[gpu]); for (int i=0;i<num_gpus*2;i++) { int _i=gpu*num_gpus*2+i; util::GRError(hipStreamCreate(&streams[_i]), "hipStreamCreate fialed.",__FILE__,__LINE__); if (i<num_gpus) context[gpu*num_gpus+i] = mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu],streams[_i]); } } printf("\n"); fflush(stdout); // Parse graph-contruction params g_undirected = args.CheckCmdLineFlag("undirected"); std::string graph_type = argv[1]; int flags = args.ParsedArgc(); int graph_args = argc - flags - 1; if (graph_args < 1) { Usage(); return 1; } // // Construct graph and perform search(es) // typedef int VertexId; // Use as the node identifier type typedef int Value; // Use as the value type typedef int SizeT; // Use as the graph size type Csr<VertexId, Value, SizeT> csr(false); // default value for stream_from_host is false if (graph_args < 1) { Usage(); return 1; } if (graph_type == "market") { // Matrix-market coordinate-formatted graph file char *market_filename = (graph_args == 2) ? argv[2] : NULL; if (graphio::BuildMarketGraph<true>( market_filename, csr, g_undirected, false) != 0) // no inverse graph { return 1; } } else if (graph_type == "rmat") { // parse rmat parameters SizeT rmat_nodes = 1 << 10; SizeT rmat_edges = 1 << 10; SizeT rmat_scale = 10; SizeT rmat_edgefactor = 48; double rmat_a = 0.57; double rmat_b = 0.19; double rmat_c = 0.19; double rmat_d = 1-(rmat_a+rmat_b+rmat_c); double rmat_vmultipiler = 20; double rmat_vmin = 1; int rmat_seed = -1; args.GetCmdLineArgument("rmat_scale", rmat_scale); rmat_nodes = 1 << rmat_scale; args.GetCmdLineArgument("rmat_nodes", rmat_nodes); args.GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor); rmat_edges = rmat_nodes * rmat_edgefactor; rmat_vmultipiler = rmat_edgefactor * 2; args.GetCmdLineArgument("rmat_edges", rmat_edges); args.GetCmdLineArgument("rmat_a", rmat_a); args.GetCmdLineArgument("rmat_b", rmat_b); args.GetCmdLineArgument("rmat_c", rmat_c); rmat_d = 1-(rmat_a+rmat_b+rmat_c); args.GetCmdLineArgument("rmat_d", rmat_d); args.GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler); args.GetCmdLineArgument("rmat_vmin", rmat_vmin); args.GetCmdLineArgument("rmat_seed", rmat_seed); CpuTimer cpu_timer; cpu_timer.Start(); if (graphio::BuildRmatGraph<true>( rmat_nodes, rmat_edges, csr, g_undirected, rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed) != 0) { return 1; } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); printf("graph generated: %.3f ms, a = %.3f, b = %.3f, c = %.3f, d = %.3f\n", elapsed, rmat_a, rmat_b, rmat_c, rmat_d); } else if (graph_type == "rgg") { SizeT rgg_nodes = 1 << 10; SizeT rgg_scale = 10; double rgg_thfactor = 0.55; double rgg_threshold = rgg_thfactor * sqrt(log(rgg_nodes) / rgg_nodes); double rgg_vmultipiler = 20; double rgg_vmin = 1; int rgg_seed = -1; args.GetCmdLineArgument("rgg_scale", rgg_scale); rgg_nodes = 1 << rgg_scale; args.GetCmdLineArgument("rgg_nodes", rgg_nodes); args.GetCmdLineArgument("rgg_thfactor", rgg_thfactor); rgg_threshold = rgg_thfactor * sqrt(log(rgg_nodes) / rgg_nodes); args.GetCmdLineArgument("rgg_threshold", rgg_threshold); args.GetCmdLineArgument("rgg_vmultipiler", rgg_vmultipiler); args.GetCmdLineArgument("rgg_vmin", rgg_vmin); args.GetCmdLineArgument("rgg_seed", rgg_seed); CpuTimer cpu_timer; cpu_timer.Start(); if (graphio::BuildRggGraph<true>( rgg_nodes, csr, rgg_threshold, g_undirected, rgg_vmultipiler, rgg_vmin, rgg_seed) !=0) { return 1; } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); printf("graph generated: %.3f ms, threshold = %.3lf, vmultipiler = %.3lf\n", elapsed, rgg_threshold, rgg_vmultipiler); } else { // Unknown graph type fprintf(stderr, "Unspecified graph type\n"); return 1; } csr.PrintHistogram(); csr.DisplayGraph(true); //print graph with edge_value //util::cpu_mt::PrintCPUArray("row_offsets", csr.row_offsets,csr.nodes+1); //util::cpu_mt::PrintCPUArray("colum_indiece", csr.column_indices, csr.edges); csr.GetAverageEdgeValue(); csr.GetAverageDegree(); int max_degree; csr.GetNodeWithHighestDegree(max_degree); printf("max degree:%d\n", max_degree); // Run tests RunTests(&csr, args, num_gpus, context, gpu_idx, streams); return 0; }
cc108c6051b774013946e0535deac7f1c47691b6.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_sssp.cu * * @brief Simple test driver program for single source shorest path. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <iostream> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> #include <gunrock/graphio/rmat.cuh> #include <gunrock/graphio/rgg.cuh> // SSSP includes #include <gunrock/app/sssp/sssp_enactor.cuh> #include <gunrock/app/sssp/sssp_problem.cuh> #include <gunrock/app/sssp/sssp_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <gunrock/priority_queue/kernel.cuh> #include <moderngpu.cuh> // Boost includes for CPU dijkstra SSSP reference algorithms #include <boost/config.hpp> #include <boost/graph/graph_traits.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/dijkstra_shortest_paths.hpp> #include <boost/property_map/property_map.hpp> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::sssp; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( " test_sssp <graph type> <graph type args> [--device=<device_index>]\n" " [--undirected] [--instrumented] [--src=<source index>] [--quick=<0|1>]\n" " [--mark-pred] [--queue-sizing=<scale factor>] [--traversal-mode=<0|1>]\n" " [--in-sizing=<in/out queue scale factor>] [--disable-size-check]\n" " [--grid-size=<grid size>] [partition_method=<random|biasrandom|clustered|metis>]\n" " [--v] [--iteration-num=<num>]\n" "\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n" " edges from stdin (or from the optionally-specified file).\n" " --device=<device_index> Set GPU device for running the test. [Default: 0].\n" " --undirected Treat the graph as undirected (symmetric).\n" " --instrumented Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty\n" " (a relative indicator of load imbalance.)\n" " --src=<source vertex id> Begins SSSP from the source [Default: 0].\n" " If randomize: from a random source vertex.\n" " If largestdegree: from largest degree vertex.\n" " --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n" " --mark-pred Keep both label info and predecessor info.\n" " --queue-sizing=<factor> Allocates a frontier queue sized at:\n" " (graph-edges * <scale factor>) [Default: 1.0].\n" " --v Print verbose per iteration debug info.\n" " --iteration-num=<number> Number of runs to perform the test [Default: 1].\n" " --traversal-mode=<0 or 1> Set traversal strategy, 0 for Load-Balanced,\n" " 1 for Dynamic-Cooperative [Default: dynamic\n" " determine based on average degree].\n" ); } /** * @brief Displays the SSSP result (i.e., distance from source) * * @tparam VertexId * @tparam SizeT * * @param[in] source_path Search depth from the source for each node. * @param[in] num_nodes Number of nodes in the graph. */ template<typename VertexId, typename SizeT> void DisplaySolution (VertexId *source_path, SizeT num_nodes) { if (num_nodes > 40) num_nodes = 40; printf("["); for (VertexId i = 0; i < num_nodes; ++i) { PrintValue(i); printf(":"); PrintValue(source_path[i]); printf(" "); } printf("]\n"); } /** * Performance/Evaluation statistics */ struct Stats { const char *name; Statistic rate; Statistic search_depth; Statistic redundant_work; Statistic duty; Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {} Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {} }; /** * @brief Test_Parameter structure */ struct Test_Parameter : gunrock::app::TestParameter_Base { public: //bool mark_predecessors ;// Mark src-distance vs. parent vertices int delta_factor; double max_queue_sizing1; Test_Parameter() { delta_factor = 16; mark_predecessors = false; max_queue_sizing1 = -1.0; } ~Test_Parameter() { } void Init(CommandLineArgs &args) { TestParameter_Base::Init(args); mark_predecessors = args.CheckCmdLineFlag("mark-pred"); args.GetCmdLineArgument("delta-factor" , delta_factor ); args.GetCmdLineArgument("queue-sizing1", max_queue_sizing1); } }; /** * @brief Displays timing and correctness statistics * * @tparam MARK_PREDECESSORS * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] stats Reference to the Stats object defined in RunTests * @param[in] src Source node where SSSP starts * @param[in] h_labels Host-side vector stores computed labels for validation * @param[in] graph Reference to the CSR graph we process on * @param[in] elapsed Total elapsed kernel running time * @param[in] search_depth Maximum search depth of the SSSP algorithm * @param[in] total_queued Total element queued in SSSP kernel running process * @param[in] avg_duty Average duty of the SSSP kernels */ template< typename VertexId, typename Value, typename SizeT> void DisplayStats( Stats &stats, VertexId src, Value *h_labels, const Csr<VertexId, Value, SizeT> &graph, double elapsed, VertexId search_depth, long long total_queued, double avg_duty) { // Compute nodes and edges visited SizeT edges_visited = 0; SizeT nodes_visited = 0; for (VertexId i = 0; i < graph.nodes; ++i) { if (h_labels[i] < util::MaxValue<VertexId>()) { ++nodes_visited; edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i]; } } double redundant_work = 0.0; if (total_queued > 0) { redundant_work = ((double) total_queued - edges_visited) / edges_visited; } redundant_work *= 100; // Display test name printf("[%s] finished.", stats.name); // Display statistics if (nodes_visited < 5) { printf("Fewer than 5 vertices visited.\n"); } else { // Display the specific sample statistics double m_teps = (double) edges_visited / (elapsed * 1000.0); printf("\n elapsed: %.4f ms, rate: %.4f MiEdges/s", elapsed, m_teps); if (search_depth != 0) printf(", search_depth: %lld", (long long) search_depth); printf("\n src: %lld, nodes_visited: %lld, edges_visited: %lld", (long long) src, (long long) nodes_visited, (long long) edges_visited); if (avg_duty != 0) { printf("\n avg CTA duty: %.2f%%", avg_duty * 100); } if (total_queued > 0) { printf(", total queued: %lld", total_queued); } if (redundant_work > 0) { printf(", redundant work: %.2f%%", redundant_work); } printf("\n"); } } /****************************************************************************** * SSSP Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference SSSP ranking implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_values Host-side vector to store CPU computed labels for each node * @param[in] node_preds Host-side vector to store CPU computed predecessors for each node * @param[in] src Source node where SSSP starts */ template< typename VertexId, typename Value, typename SizeT, bool MARK_PREDECESSORS> void SimpleReferenceSssp( const Csr<VertexId, Value, SizeT> &graph, Value *node_values, VertexId *node_preds, VertexId src) { using namespace boost; // Prepare Boost Datatype and Data structure typedef adjacency_list<vecS, vecS, directedS, no_property, property <edge_weight_t, unsigned int> > Graph; typedef graph_traits<Graph>::vertex_descriptor vertex_descriptor; typedef graph_traits<Graph>::edge_descriptor edge_descriptor; typedef std::pair<VertexId, VertexId> Edge; Edge *edges = ( Edge*)malloc(sizeof( Edge)*graph.edges); Value *weight = (Value*)malloc(sizeof(Value)*graph.edges); for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j) { edges[j] = Edge(i, graph.column_indices[j]); weight[j] = graph.edge_values[j]; } } Graph g(edges, edges + graph.edges, weight, graph.nodes); std::vector<Value> d(graph.nodes); std::vector<vertex_descriptor> p(graph.nodes); vertex_descriptor s = vertex(src, g); property_map<Graph, vertex_index_t>::type indexmap = get(vertex_index, g); // // Perform SSSP // CpuTimer cpu_timer; cpu_timer.Start(); if (MARK_PREDECESSORS) { dijkstra_shortest_paths(g, s, predecessor_map(boost::make_iterator_property_map( p.begin(), get(boost::vertex_index, g))).distance_map( boost::make_iterator_property_map( d.begin(), get(boost::vertex_index, g)))); } else { dijkstra_shortest_paths(g, s, distance_map(boost::make_iterator_property_map( d.begin(), get(boost::vertex_index, g)))); } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); printf("CPU SSSP finished in %lf msec.\n", elapsed); Coo<Value, Value>* sort_dist = NULL; Coo<VertexId, VertexId>* sort_pred = NULL; sort_dist = (Coo<Value, Value>*)malloc( sizeof(Coo<Value, Value>) * graph.nodes); if (MARK_PREDECESSORS) { sort_pred = (Coo<VertexId, VertexId>*)malloc( sizeof(Coo<VertexId, VertexId>) * graph.nodes); } graph_traits < Graph >::vertex_iterator vi, vend; for (tie(vi, vend) = vertices(g); vi != vend; ++vi) { sort_dist[(*vi)].row = (*vi); sort_dist[(*vi)].col = d[(*vi)]; } std::stable_sort( sort_dist, sort_dist + graph.nodes, RowFirstTupleCompare<Coo<Value, Value> >); if (MARK_PREDECESSORS) { for (tie(vi, vend) = vertices(g); vi != vend; ++vi) { sort_pred[(*vi)].row = (*vi); sort_pred[(*vi)].col = p[(*vi)]; } std::stable_sort( sort_pred, sort_pred + graph.nodes, RowFirstTupleCompare< Coo<VertexId, VertexId> >); } for (int i = 0; i < graph.nodes; ++i) { node_values[i] = sort_dist[i].col; } if (MARK_PREDECESSORS) { for (int i = 0; i < graph.nodes; ++i) { node_preds[i] = sort_pred[i].col; } } if (sort_dist) free(sort_dist); if (sort_pred) free(sort_pred); } /** * @brief Run SSSP tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam MARK_PREDECESSORS * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK, bool MARK_PREDECESSORS> void RunTests(Test_Parameter *parameter) { typedef SSSPProblem< VertexId, SizeT, Value, MARK_PREDECESSORS> Problem; typedef SSSPEnactor< Problem, INSTRUMENT, DEBUG, SIZE_CHECK> Enactor; Csr<VertexId, Value, SizeT> *graph = (Csr<VertexId, Value, SizeT>*)parameter->graph; VertexId src = (VertexId)parameter -> src; int max_grid_size = parameter -> max_grid_size; int num_gpus = parameter -> num_gpus; double max_queue_sizing = parameter -> max_queue_sizing; double max_in_sizing = parameter -> max_in_sizing; ContextPtr *context = (ContextPtr*)parameter -> context; std::string partition_method = parameter -> partition_method; int *gpu_idx = parameter -> gpu_idx; cudaStream_t *streams = parameter -> streams; float partition_factor = parameter -> partition_factor; int partition_seed = parameter -> partition_seed; bool g_quick = parameter -> g_quick; bool g_stream_from_host = parameter -> g_stream_from_host; int delta_factor = parameter -> delta_factor; int iterations = parameter -> iterations; int traversal_mode = parameter -> traversal_mode; size_t *org_size = new size_t[num_gpus]; // Allocate host-side label array (for both reference and gpu-computed results) Value *reference_labels = new Value[graph->nodes]; Value *h_labels = new Value[graph->nodes]; Value *reference_check_label = (g_quick) ? NULL : reference_labels; VertexId *reference_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL; VertexId *h_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL; VertexId *reference_check_pred = (g_quick || !MARK_PREDECESSORS) ? NULL : reference_preds; for (int gpu=0;gpu<num_gpus;gpu++) { size_t dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&(org_size[gpu]),&dummy); } // Allocate SSSP enactor map Enactor* enactor = new Enactor(num_gpus, gpu_idx); // Allocate problem on GPU Problem *problem = new Problem; util::GRError(problem->Init( g_stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, delta_factor, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "Problem SSSP Initialization Failed", __FILE__, __LINE__); util::GRError(enactor->Init (context, problem, max_grid_size, traversal_mode), "SSSP Enactor init failed", __FILE__, __LINE__); // // Compute reference CPU SSSP solution for source-distance // if (reference_check_label != NULL) { printf("Computing reference value ...\n"); SimpleReferenceSssp<VertexId, Value, SizeT, MARK_PREDECESSORS>( *graph, reference_check_label, reference_check_pred, src); printf("\n"); } Stats *stats = new Stats("GPU SSSP"); long long total_queued = 0; VertexId search_depth = 0; double avg_duty = 0.0; float elapsed = 0.0f; // Perform SSSP CpuTimer cpu_timer; for (int iter = 0; iter < iterations; ++iter) { util::GRError(problem->Reset(src, enactor->GetFrontierType(), max_queue_sizing), "SSSP Problem Data Reset Failed", __FILE__, __LINE__); util::GRError(enactor->Reset(), "SSSP Enactor Reset failed", __FILE__, __LINE__); printf("__________________________\n");fflush(stdout); cpu_timer.Start(); util::GRError(enactor->Enact(src, traversal_mode), "SSSP Problem Enact Failed", __FILE__, __LINE__); cpu_timer.Stop(); printf("--------------------------\n");fflush(stdout); elapsed += cpu_timer.ElapsedMillis(); } elapsed /= iterations; enactor->GetStatistics(total_queued, search_depth, avg_duty); // Copy out results util::GRError(problem->Extract(h_labels, h_preds), "SSSP Problem Data Extraction Failed", __FILE__, __LINE__); for (SizeT i=0; i<graph->nodes;i++) if (reference_check_label[i]==-1) reference_check_label[i]=util::MaxValue<Value>(); // Display Solution printf("\nFirst 40 labels of the GPU result.\n"); DisplaySolution(h_labels, graph->nodes); // Verify the result if (reference_check_label != NULL) { printf("Label Validity: "); int error_num = CompareResults(h_labels, reference_check_label, graph->nodes, true); if (error_num > 0) printf("%d errors occurred.\n", error_num); printf("\nFirst 40 labels of the reference CPU result.\n"); DisplaySolution(reference_check_label, graph->nodes); } if (MARK_PREDECESSORS) { printf("\nFirst 40 preds of the GPU result.\n"); DisplaySolution(h_preds, graph->nodes); if (reference_check_label != NULL) { printf("\nFirst 40 preds of the reference CPU result (could be different because the paths are not unique).\n"); DisplaySolution(reference_check_pred, graph->nodes); } } DisplayStats( *stats, src, h_labels, *graph, elapsed, search_depth, total_queued, avg_duty); printf("\n\tMemory Usage(B)\t"); for (int gpu=0;gpu<num_gpus;gpu++) if (num_gpus>1) {if (gpu!=0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1",gpu,gpu,gpu,gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);} else printf(" #keys%d,0\t #keys%d,1", gpu, gpu); if (num_gpus>1) printf(" #keys%d",num_gpus); printf("\n"); double max_queue_sizing_[2] = {0,0}, max_in_sizing_=0; for (int gpu=0;gpu<num_gpus;gpu++) { size_t gpu_free,dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&gpu_free,&dummy); printf("GPU_%d\t %ld",gpu_idx[gpu],org_size[gpu]-gpu_free); for (int i=0;i<num_gpus;i++) { for (int j=0; j<2; j++) { SizeT x=problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0*x/(num_gpus>1?problem->graph_slices[gpu]->in_counter[i]:problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) max_queue_sizing_[j]=factor; } if (num_gpus>1 && i!=0 ) for (int t=0;t<2;t++) { SizeT x=problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0*x/problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) max_in_sizing_=factor; } } if (num_gpus>1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus>1) printf("\t in_sizing =\t %lf", max_in_sizing_); printf("\n"); // Cleanup if (org_size ) {delete[] org_size ; org_size = NULL;} if (stats ) {delete stats ; stats = NULL;} if (enactor ) {delete enactor ; enactor = NULL;} if (problem ) {delete problem ; problem = NULL;} if (reference_labels) {delete[] reference_labels; reference_labels = NULL;} if (h_labels ) {delete[] h_labels ; h_labels = NULL;} if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;} if (h_preds ) {delete[] h_preds ; h_preds = NULL;} } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK> void RunTests_mark_predecessors(Test_Parameter *parameter) { if (parameter->mark_predecessors) RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK, true > (parameter); else RunTests <VertexId, Value, SizeT, INSTRUMENT, DEBUG, SIZE_CHECK, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG> void RunTests_size_check(Test_Parameter *parameter) { if (parameter->size_check) RunTests_mark_predecessors <VertexId, Value, SizeT, INSTRUMENT, DEBUG, true > (parameter); else RunTests_mark_predecessors <VertexId, Value, SizeT, INSTRUMENT, DEBUG, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT> void RunTests_debug(Test_Parameter *parameter) { if (parameter->debug) RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, true > (parameter); else RunTests_size_check <VertexId, Value, SizeT, INSTRUMENT, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT> void RunTests_instrumented(Test_Parameter *parameter) { if (parameter->instrumented) RunTests_debug <VertexId, Value, SizeT, true > (parameter); else RunTests_debug <VertexId, Value, SizeT, false> (parameter); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Pointer to the CSR graph we process on * @param[in] args Reference to the command line arguments * @param[in] num_gpus Number of GPUs to run algorithm * @param[in] context CudaContext pointer for ModernGPU APIs * @param[in] gpu_idx GPU(s) used to run algorithm * @param[in] streams CUDA streams */ template < typename VertexId, typename Value, typename SizeT> void RunTests( Csr<VertexId, Value, SizeT> *graph, CommandLineArgs &args, int num_gpus, ContextPtr *context, int *gpu_idx, cudaStream_t *streams) { string src_str = ""; Test_Parameter *parameter = new Test_Parameter; parameter -> Init(args); parameter -> graph = graph; parameter -> num_gpus = num_gpus; parameter -> context = context; parameter -> gpu_idx = gpu_idx; parameter -> streams = streams; // source vertex to start args.GetCmdLineArgument("src", src_str); if (src_str.empty()) { parameter->src = 0; } else if (src_str.compare("randomize") == 0) { parameter->src = graphio::RandomNode(graph->nodes); } else if (src_str.compare("largestdegree") == 0) { int max_degree; parameter->src = graph->GetNodeWithHighestDegree(max_degree); } else { args.GetCmdLineArgument("src", parameter->src); } // traversal mode args.GetCmdLineArgument("traversal-mode", parameter->traversal_mode); if (parameter->traversal_mode == -1) { parameter->traversal_mode = 0; } printf("src = %lld\n", parameter->src); RunTests_instrumented<VertexId, Value, SizeT>(parameter); } /****************************************************************************** * Main ******************************************************************************/ int main( int argc, char** argv) { CommandLineArgs args(argc, argv); int num_gpus = 0; int *gpu_idx = NULL; ContextPtr *context = NULL; cudaStream_t *streams = NULL; bool g_undirected = false; if ((argc < 2) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } if (args.CheckCmdLineFlag ("device")) { std::vector<int> gpus; args.GetCmdLineArguments<int>("device",gpus); num_gpus = gpus.size(); gpu_idx = new int[num_gpus]; for (int i=0;i<num_gpus;i++) gpu_idx[i] = gpus[i]; } else { num_gpus = 1; gpu_idx = new int[num_gpus]; gpu_idx[0] = 0; } streams = new cudaStream_t[num_gpus * num_gpus *2]; context = new ContextPtr [num_gpus * num_gpus]; printf("Using %d gpus: ", num_gpus); for (int gpu=0;gpu<num_gpus;gpu++) { printf(" %d ", gpu_idx[gpu]); util::SetDevice(gpu_idx[gpu]); for (int i=0;i<num_gpus*2;i++) { int _i=gpu*num_gpus*2+i; util::GRError(cudaStreamCreate(&streams[_i]), "cudaStreamCreate fialed.",__FILE__,__LINE__); if (i<num_gpus) context[gpu*num_gpus+i] = mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu],streams[_i]); } } printf("\n"); fflush(stdout); // Parse graph-contruction params g_undirected = args.CheckCmdLineFlag("undirected"); std::string graph_type = argv[1]; int flags = args.ParsedArgc(); int graph_args = argc - flags - 1; if (graph_args < 1) { Usage(); return 1; } // // Construct graph and perform search(es) // typedef int VertexId; // Use as the node identifier type typedef int Value; // Use as the value type typedef int SizeT; // Use as the graph size type Csr<VertexId, Value, SizeT> csr(false); // default value for stream_from_host is false if (graph_args < 1) { Usage(); return 1; } if (graph_type == "market") { // Matrix-market coordinate-formatted graph file char *market_filename = (graph_args == 2) ? argv[2] : NULL; if (graphio::BuildMarketGraph<true>( market_filename, csr, g_undirected, false) != 0) // no inverse graph { return 1; } } else if (graph_type == "rmat") { // parse rmat parameters SizeT rmat_nodes = 1 << 10; SizeT rmat_edges = 1 << 10; SizeT rmat_scale = 10; SizeT rmat_edgefactor = 48; double rmat_a = 0.57; double rmat_b = 0.19; double rmat_c = 0.19; double rmat_d = 1-(rmat_a+rmat_b+rmat_c); double rmat_vmultipiler = 20; double rmat_vmin = 1; int rmat_seed = -1; args.GetCmdLineArgument("rmat_scale", rmat_scale); rmat_nodes = 1 << rmat_scale; args.GetCmdLineArgument("rmat_nodes", rmat_nodes); args.GetCmdLineArgument("rmat_edgefactor", rmat_edgefactor); rmat_edges = rmat_nodes * rmat_edgefactor; rmat_vmultipiler = rmat_edgefactor * 2; args.GetCmdLineArgument("rmat_edges", rmat_edges); args.GetCmdLineArgument("rmat_a", rmat_a); args.GetCmdLineArgument("rmat_b", rmat_b); args.GetCmdLineArgument("rmat_c", rmat_c); rmat_d = 1-(rmat_a+rmat_b+rmat_c); args.GetCmdLineArgument("rmat_d", rmat_d); args.GetCmdLineArgument("rmat_vmultipiler", rmat_vmultipiler); args.GetCmdLineArgument("rmat_vmin", rmat_vmin); args.GetCmdLineArgument("rmat_seed", rmat_seed); CpuTimer cpu_timer; cpu_timer.Start(); if (graphio::BuildRmatGraph<true>( rmat_nodes, rmat_edges, csr, g_undirected, rmat_a, rmat_b, rmat_c, rmat_d, rmat_vmultipiler, rmat_vmin, rmat_seed) != 0) { return 1; } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); printf("graph generated: %.3f ms, a = %.3f, b = %.3f, c = %.3f, d = %.3f\n", elapsed, rmat_a, rmat_b, rmat_c, rmat_d); } else if (graph_type == "rgg") { SizeT rgg_nodes = 1 << 10; SizeT rgg_scale = 10; double rgg_thfactor = 0.55; double rgg_threshold = rgg_thfactor * sqrt(log(rgg_nodes) / rgg_nodes); double rgg_vmultipiler = 20; double rgg_vmin = 1; int rgg_seed = -1; args.GetCmdLineArgument("rgg_scale", rgg_scale); rgg_nodes = 1 << rgg_scale; args.GetCmdLineArgument("rgg_nodes", rgg_nodes); args.GetCmdLineArgument("rgg_thfactor", rgg_thfactor); rgg_threshold = rgg_thfactor * sqrt(log(rgg_nodes) / rgg_nodes); args.GetCmdLineArgument("rgg_threshold", rgg_threshold); args.GetCmdLineArgument("rgg_vmultipiler", rgg_vmultipiler); args.GetCmdLineArgument("rgg_vmin", rgg_vmin); args.GetCmdLineArgument("rgg_seed", rgg_seed); CpuTimer cpu_timer; cpu_timer.Start(); if (graphio::BuildRggGraph<true>( rgg_nodes, csr, rgg_threshold, g_undirected, rgg_vmultipiler, rgg_vmin, rgg_seed) !=0) { return 1; } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); printf("graph generated: %.3f ms, threshold = %.3lf, vmultipiler = %.3lf\n", elapsed, rgg_threshold, rgg_vmultipiler); } else { // Unknown graph type fprintf(stderr, "Unspecified graph type\n"); return 1; } csr.PrintHistogram(); csr.DisplayGraph(true); //print graph with edge_value //util::cpu_mt::PrintCPUArray("row_offsets", csr.row_offsets,csr.nodes+1); //util::cpu_mt::PrintCPUArray("colum_indiece", csr.column_indices, csr.edges); csr.GetAverageEdgeValue(); csr.GetAverageDegree(); int max_degree; csr.GetNodeWithHighestDegree(max_degree); printf("max degree:%d\n", max_degree); // Run tests RunTests(&csr, args, num_gpus, context, gpu_idx, streams); return 0; }
acf8e88943cd8192ccb0528c74312d3f413062d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void neg_double(int n,int idx,double *dy,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = -dy[i]; } }
acf8e88943cd8192ccb0528c74312d3f413062d6.cu
#include "includes.h" __global__ void neg_double(int n,int idx,double *dy,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = -dy[i]; } }
5e6ffe272eb9282b3595e0d46162dfe5e88083c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/hip/LaunchUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { #define MAX_THREADS 512 // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_out_frame( const scalar_t* idata, scalar_t* odata, const size_t nc, const size_t height1, const size_t width1, const size_t height2, const size_t width2, float height_scale, float width_scale) { size_t nc_iter = threadIdx.z + blockIdx.z * blockDim.z; int w2 = threadIdx.x + blockIdx.x * blockDim.x; int h2 = threadIdx.y + blockIdx.y * blockDim.y; if (w2 >= width2 || h2 >= height2) { return; } int nc_stride = blockDim.z * gridDim.z; const size_t h1 = height1 == height2 ? h2 : nearest_neighbor_compute_source_index(height_scale, h2, height1); const size_t w1 = width1 == width2 ? w2 : nearest_neighbor_compute_source_index(width_scale, w2, width1); size_t src_index = (nc_iter * height1 + h1) * width1 + w1; size_t src_index_stride = nc_stride * width1 * height1; size_t dst_index = (nc_iter * height2 + h2) * width2 + w2; size_t dst_index_stride = nc_stride * width2 * height2; // iterating over while (nc_iter < nc) { odata[dst_index] = idata[src_index]; dst_index += dst_index_stride; src_index += src_index_stride; nc_iter += nc_stride; } } // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_h, size_t src_dim_w, size_t dst_dim_h, size_t dst_dim_w, scalar_t* grad_i, float height_scale, float width_scale) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_h * dst_dim_w) return; int dst_c_stride = dst_dim_h * dst_dim_w; int src_c_stride = src_dim_h * src_dim_w; int c = (dst_idx / (dst_c_stride)) % dim_c; int dst_y = (dst_idx / dst_dim_w) % dst_dim_h; int src_y = nearest_neighbor_bw_compute_source_index(height_scale, dst_y, src_dim_h); int src_y_up = nearest_neighbor_bw_compute_source_index( height_scale, dst_y + 1, src_dim_h + 1); int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_bw_compute_source_index(width_scale, dst_x, src_dim_w); int src_x_up = nearest_neighbor_bw_compute_source_index( width_scale, dst_x + 1, src_dim_w + 1); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; for (int y = src_y; y < src_y_up; y++) { for (int x = src_x; x < src_x_up; x++) { int src_idx = b * dim_c * src_c_stride + c * src_c_stride + y * src_dim_w + x; grad += grad_o[src_idx]; } } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_c_stride; } } static void upsample_nearest2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU( "upsample_nearest2d_out_cuda_template", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_.size(0); int channels = input_.size(1); int input_height = input_.size(2); int input_width = input_.size(3); upsample_2d_shape_check( input_, Tensor(), nbatch, channels, input_height, input_width, output_height, output_width); AT_ASSERT( input_height > 0 && input_width > 0 && output_height > 0 && output_width > 0); Tensor input = input_.contiguous(); output.resize_({nbatch, channels, output_height, output_width}); if (input.numel() == 0) { return; } int nc = nbatch * channels; const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; // upsample_2d_shape_check makes sure input/output tensor is not empty; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(output_width), max_threads)); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(output_height), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(nc, max_threads / block_x / block_y)); const dim3 block(block_x, block_y, block_z); int grid_x = cuda::ATenCeilDiv(output_width, block_x); int grid_y = cuda::ATenCeilDiv(output_height, block_y); int grid_z = std::min<int>( maxGridSize[2], cuda::ATenCeilDiv(nc, block_z * 4)); const dim3 grid(grid_x, grid_y, grid_z); // Error out on cases where grid_x & grid_y exceeds limit of launch config, as // the current kernel implementation doesn't loop over the two dimensions. // This is unlikely to happen. // TODO: kernel implementation could stride on spatial dimension. We probably // need to overhaul the kernel. TORCH_CHECK( grid_x <= maxGridSize[0] && grid_y <= maxGridSize[1], "input tensor has spatial dimension larger than the kernel capacity"); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height); const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width); hipLaunchKernelGGL(( upsample_nearest2d_out_frame<scalar_t, accscalar_t>) , dim3(grid), dim3(block), 0, stream, idata, odata, nc, input_height, input_width, output_height, output_width, height_scale, width_scale); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } static void upsample_nearest2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest2d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 4, "It is expected input_size equals to 4, but got size ", input_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; upsample_2d_shape_check( Tensor(), grad_output_, nbatch, channels, input_height, input_width, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_height, input_width}); if (grad_input.numel() == 0) { return; } // upsample_2d_shape_check makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height); const float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width); hipLaunchKernelGGL(( upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t>) , dim3(gdim), dim3(bdim), 0, stream, odata, nbatch, channels, output_height, output_width, input_height, input_width, idata, height_scale, width_scale); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_nearest2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor& output) { upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w); } TORCH_IMPL_FUNC(upsample_nearest2d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor& grad_input) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); } using at::native::upsample::compute_output_size; using at::native::upsample_cuda::get_scale_value; Tensor upsample_nearest2d_cuda( const Tensor& input, c10::optional<IntArrayRef> output_size, c10::optional<ArrayRef<double>> scale_factors) { auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); upsample_nearest2d_out_cuda_template(output, input, osize, scale_h, scale_w); return output; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, c10::optional<IntArrayRef> output_size, IntArrayRef input_size, c10::optional<ArrayRef<double>> scale_factors) { auto osize = compute_output_size(input_size, output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, osize, input_size, scale_h, scale_w); return grad_input; } } // namespace native } // namespace at
5e6ffe272eb9282b3595e0d46162dfe5e88083c6.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/cuda/LaunchUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { #define MAX_THREADS 512 // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_out_frame( const scalar_t* idata, scalar_t* odata, const size_t nc, const size_t height1, const size_t width1, const size_t height2, const size_t width2, float height_scale, float width_scale) { size_t nc_iter = threadIdx.z + blockIdx.z * blockDim.z; int w2 = threadIdx.x + blockIdx.x * blockDim.x; int h2 = threadIdx.y + blockIdx.y * blockDim.y; if (w2 >= width2 || h2 >= height2) { return; } int nc_stride = blockDim.z * gridDim.z; const size_t h1 = height1 == height2 ? h2 : nearest_neighbor_compute_source_index(height_scale, h2, height1); const size_t w1 = width1 == width2 ? w2 : nearest_neighbor_compute_source_index(width_scale, w2, width1); size_t src_index = (nc_iter * height1 + h1) * width1 + w1; size_t src_index_stride = nc_stride * width1 * height1; size_t dst_index = (nc_iter * height2 + h2) * width2 + w2; size_t dst_index_stride = nc_stride * width2 * height2; // iterating over while (nc_iter < nc) { odata[dst_index] = idata[src_index]; dst_index += dst_index_stride; src_index += src_index_stride; nc_iter += nc_stride; } } // see NOTE [ Nearest neighbor upsampling kernel implementation ] template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest2d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_h, size_t src_dim_w, size_t dst_dim_h, size_t dst_dim_w, scalar_t* grad_i, float height_scale, float width_scale) { int dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_h * dst_dim_w) return; int dst_c_stride = dst_dim_h * dst_dim_w; int src_c_stride = src_dim_h * src_dim_w; int c = (dst_idx / (dst_c_stride)) % dim_c; int dst_y = (dst_idx / dst_dim_w) % dst_dim_h; int src_y = nearest_neighbor_bw_compute_source_index(height_scale, dst_y, src_dim_h); int src_y_up = nearest_neighbor_bw_compute_source_index( height_scale, dst_y + 1, src_dim_h + 1); int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_bw_compute_source_index(width_scale, dst_x, src_dim_w); int src_x_up = nearest_neighbor_bw_compute_source_index( width_scale, dst_x + 1, src_dim_w + 1); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; for (int y = src_y; y < src_y_up; y++) { for (int x = src_x; x < src_x_up; x++) { int src_idx = b * dim_c * src_c_stride + c * src_c_stride + y * src_dim_w + x; grad += grad_o[src_idx]; } } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_c_stride; } } static void upsample_nearest2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU( "upsample_nearest2d_out_cuda_template", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_.size(0); int channels = input_.size(1); int input_height = input_.size(2); int input_width = input_.size(3); upsample_2d_shape_check( input_, Tensor(), nbatch, channels, input_height, input_width, output_height, output_width); AT_ASSERT( input_height > 0 && input_width > 0 && output_height > 0 && output_width > 0); Tensor input = input_.contiguous(); output.resize_({nbatch, channels, output_height, output_width}); if (input.numel() == 0) { return; } int nc = nbatch * channels; const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize; // upsample_2d_shape_check makes sure input/output tensor is not empty; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(output_width), max_threads)); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(output_height), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(nc, max_threads / block_x / block_y)); const dim3 block(block_x, block_y, block_z); int grid_x = cuda::ATenCeilDiv(output_width, block_x); int grid_y = cuda::ATenCeilDiv(output_height, block_y); int grid_z = std::min<int>( maxGridSize[2], cuda::ATenCeilDiv(nc, block_z * 4)); const dim3 grid(grid_x, grid_y, grid_z); // Error out on cases where grid_x & grid_y exceeds limit of launch config, as // the current kernel implementation doesn't loop over the two dimensions. // This is unlikely to happen. // TODO: kernel implementation could stride on spatial dimension. We probably // need to overhaul the kernel. TORCH_CHECK( grid_x <= maxGridSize[0] && grid_y <= maxGridSize[1], "input tensor has spatial dimension larger than the kernel capacity"); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest2d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height); const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width); upsample_nearest2d_out_frame<scalar_t, accscalar_t> <<<grid, block, 0, stream>>>( idata, odata, nc, input_height, input_width, output_height, output_width, height_scale, width_scale); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } static void upsample_nearest2d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest2d_backward_out_cuda", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 2, "It is expected output_size equals to 2, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 4, "It is expected input_size equals to 4, but got size ", input_size.size()); int output_height = output_size[0]; int output_width = output_size[1]; int nbatch = input_size[0]; int channels = input_size[1]; int input_height = input_size[2]; int input_width = input_size[3]; upsample_2d_shape_check( Tensor(), grad_output_, nbatch, channels, input_height, input_width, output_height, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_height, input_width}); if (grad_input.numel() == 0) { return; } // upsample_2d_shape_check makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)}; // safe check for int32 indexing; implicitly restrict launch config for kernel TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); const float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height); const float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width); upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t> <<<gdim, bdim, 0, stream>>>( odata, nbatch, channels, output_height, output_width, input_height, input_width, idata, height_scale, width_scale); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_nearest2d_out_cuda) ( const Tensor& input, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor& output) { upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w); } TORCH_IMPL_FUNC(upsample_nearest2d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor& grad_input) { upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, scales_h, scales_w); } using at::native::upsample::compute_output_size; using at::native::upsample_cuda::get_scale_value; Tensor upsample_nearest2d_cuda( const Tensor& input, c10::optional<IntArrayRef> output_size, c10::optional<ArrayRef<double>> scale_factors) { auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto osize = compute_output_size(input.sizes(), output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); upsample_nearest2d_out_cuda_template(output, input, osize, scale_h, scale_w); return output; } Tensor upsample_nearest2d_backward_cuda( const Tensor& grad_output, c10::optional<IntArrayRef> output_size, IntArrayRef input_size, c10::optional<ArrayRef<double>> scale_factors) { auto osize = compute_output_size(input_size, output_size, scale_factors); auto scale_h = get_scale_value(scale_factors, 0); auto scale_w = get_scale_value(scale_factors, 1); auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); upsample_nearest2d_backward_out_cuda_template( grad_input, grad_output, osize, input_size, scale_h, scale_w); return grad_input; } } // namespace native } // namespace at
a12a769fddc0699b8f1165e19d8b35011072bcb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_jacobi_kernel; int xdim0_tea_leaf_jacobi_kernel_h = -1; int ydim0_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim1_tea_leaf_jacobi_kernel; int xdim1_tea_leaf_jacobi_kernel_h = -1; int ydim1_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim2_tea_leaf_jacobi_kernel; int xdim2_tea_leaf_jacobi_kernel_h = -1; int ydim2_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim3_tea_leaf_jacobi_kernel; int xdim3_tea_leaf_jacobi_kernel_h = -1; int ydim3_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim4_tea_leaf_jacobi_kernel; int xdim4_tea_leaf_jacobi_kernel_h = -1; int ydim4_tea_leaf_jacobi_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y) (x+xdim0_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC1(x,y) (x+xdim1_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC2(x,y) (x+xdim2_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC3(x,y) (x+xdim3_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC4(x,y) (x+xdim4_tea_leaf_jacobi_kernel*(y)) //user function __device__ void tea_leaf_jacobi_kernel_gpu(double *u1, const double *Kx, const double *Ky, const double *un,const double *u0,const double *rx,const double *ry, double *error) { u1[OPS_ACC0(0,0)] = (u0[OPS_ACC4(0,0)] + (*rx)*(Kx[OPS_ACC1(1, 0)] *un[OPS_ACC3(1, 0)] + Kx[OPS_ACC1(0,0)]*un[OPS_ACC3(-1, 0)]) + (*ry)*(Ky[OPS_ACC2(0, 1)] *un[OPS_ACC3(0, 1)] + Ky[OPS_ACC2(0,0)]*un[OPS_ACC3(0, -1)])) /(1.0 + (*rx)*(Kx[OPS_ACC1(1, 0)] + Kx[OPS_ACC1(0,0)]) + (*ry)*(Ky[OPS_ACC2(0, 1)] + Ky[OPS_ACC2(0,0)])); *error = *error + fabs(u1[OPS_ACC0(0,0)] - un[OPS_ACC3(0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_tea_leaf_jacobi_kernel( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, const double arg5, const double arg6, double* __restrict arg7, int size0, int size1 ){ double arg7_l[1]; for (int d=0; d<1; d++) arg7_l[d] = ZERO_double; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_jacobi_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_jacobi_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_jacobi_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_tea_leaf_jacobi_kernel; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_tea_leaf_jacobi_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_jacobi_kernel_gpu(arg0, arg1, arg2, arg3, arg4, &arg5, &arg6, arg7_l); } for (int d=0; d<1; d++) ops_reduction_cuda<OPS_INC>(&arg7[d+(blockIdx.x + blockIdx.y*gridDim.x)*1],arg7_l[d]); } // host stub function void ops_par_loop_tea_leaf_jacobi_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,8,range,42)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(42,"tea_leaf_jacobi_kernel"); OPS_kernels[42].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; if (xdim0 != xdim0_tea_leaf_jacobi_kernel_h || xdim1 != xdim1_tea_leaf_jacobi_kernel_h || xdim2 != xdim2_tea_leaf_jacobi_kernel_h || xdim3 != xdim3_tea_leaf_jacobi_kernel_h || xdim4 != xdim4_tea_leaf_jacobi_kernel_h) { hipMemcpyToSymbol( xdim0_tea_leaf_jacobi_kernel, &xdim0, sizeof(int) ); xdim0_tea_leaf_jacobi_kernel_h = xdim0; hipMemcpyToSymbol( xdim1_tea_leaf_jacobi_kernel, &xdim1, sizeof(int) ); xdim1_tea_leaf_jacobi_kernel_h = xdim1; hipMemcpyToSymbol( xdim2_tea_leaf_jacobi_kernel, &xdim2, sizeof(int) ); xdim2_tea_leaf_jacobi_kernel_h = xdim2; hipMemcpyToSymbol( xdim3_tea_leaf_jacobi_kernel, &xdim3, sizeof(int) ); xdim3_tea_leaf_jacobi_kernel_h = xdim3; hipMemcpyToSymbol( xdim4_tea_leaf_jacobi_kernel, &xdim4, sizeof(int) ); xdim4_tea_leaf_jacobi_kernel_h = xdim4; } #ifdef OPS_MPI double *arg7h = (double *)(((ops_reduction)args[7].data)->data + ((ops_reduction)args[7].data)->size * block->index); #else double *arg7h = (double *)(((ops_reduction)args[7].data)->data); #endif dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1); int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)*1); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg7.data = OPS_reduct_h + reduct_bytes; arg7.data_d = OPS_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg7.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; char *p_a[8]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); p_a[4] = (char *)args[4].data_d + base4; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[42].mpi_time += t2-t1; } int nshared = 0; int nthread = OPS_block_size_x*OPS_block_size_y; nshared = MAX(nshared,sizeof(double)*1); nshared = MAX(nshared*nthread,reduct_size*nthread); //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_tea_leaf_jacobi_kernel), dim3(grid), dim3(tblock), nshared , 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], *(double *)arg5.data, *(double *)arg6.data, (double *)arg7.data_d,x_size, y_size); mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg7h[d] = arg7h[d] + ((double *)arg7.data)[d+b*1]; } } arg7.data = (char *)arg7h; if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[42].time += t1-t2; } ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[42].mpi_time += t2-t1; OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg4); } }
a12a769fddc0699b8f1165e19d8b35011072bcb9.cu
// // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_jacobi_kernel; int xdim0_tea_leaf_jacobi_kernel_h = -1; int ydim0_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim1_tea_leaf_jacobi_kernel; int xdim1_tea_leaf_jacobi_kernel_h = -1; int ydim1_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim2_tea_leaf_jacobi_kernel; int xdim2_tea_leaf_jacobi_kernel_h = -1; int ydim2_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim3_tea_leaf_jacobi_kernel; int xdim3_tea_leaf_jacobi_kernel_h = -1; int ydim3_tea_leaf_jacobi_kernel_h = -1; __constant__ int xdim4_tea_leaf_jacobi_kernel; int xdim4_tea_leaf_jacobi_kernel_h = -1; int ydim4_tea_leaf_jacobi_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y) (x+xdim0_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC1(x,y) (x+xdim1_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC2(x,y) (x+xdim2_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC3(x,y) (x+xdim3_tea_leaf_jacobi_kernel*(y)) #define OPS_ACC4(x,y) (x+xdim4_tea_leaf_jacobi_kernel*(y)) //user function __device__ void tea_leaf_jacobi_kernel_gpu(double *u1, const double *Kx, const double *Ky, const double *un,const double *u0,const double *rx,const double *ry, double *error) { u1[OPS_ACC0(0,0)] = (u0[OPS_ACC4(0,0)] + (*rx)*(Kx[OPS_ACC1(1, 0)] *un[OPS_ACC3(1, 0)] + Kx[OPS_ACC1(0,0)]*un[OPS_ACC3(-1, 0)]) + (*ry)*(Ky[OPS_ACC2(0, 1)] *un[OPS_ACC3(0, 1)] + Ky[OPS_ACC2(0,0)]*un[OPS_ACC3(0, -1)])) /(1.0 + (*rx)*(Kx[OPS_ACC1(1, 0)] + Kx[OPS_ACC1(0,0)]) + (*ry)*(Ky[OPS_ACC2(0, 1)] + Ky[OPS_ACC2(0,0)])); *error = *error + fabs(u1[OPS_ACC0(0,0)] - un[OPS_ACC3(0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_tea_leaf_jacobi_kernel( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, const double arg5, const double arg6, double* __restrict arg7, int size0, int size1 ){ double arg7_l[1]; for (int d=0; d<1; d++) arg7_l[d] = ZERO_double; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_jacobi_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_jacobi_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_jacobi_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_tea_leaf_jacobi_kernel; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_tea_leaf_jacobi_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_jacobi_kernel_gpu(arg0, arg1, arg2, arg3, arg4, &arg5, &arg6, arg7_l); } for (int d=0; d<1; d++) ops_reduction_cuda<OPS_INC>(&arg7[d+(blockIdx.x + blockIdx.y*gridDim.x)*1],arg7_l[d]); } // host stub function void ops_par_loop_tea_leaf_jacobi_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,8,range,42)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(42,"tea_leaf_jacobi_kernel"); OPS_kernels[42].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; if (xdim0 != xdim0_tea_leaf_jacobi_kernel_h || xdim1 != xdim1_tea_leaf_jacobi_kernel_h || xdim2 != xdim2_tea_leaf_jacobi_kernel_h || xdim3 != xdim3_tea_leaf_jacobi_kernel_h || xdim4 != xdim4_tea_leaf_jacobi_kernel_h) { cudaMemcpyToSymbol( xdim0_tea_leaf_jacobi_kernel, &xdim0, sizeof(int) ); xdim0_tea_leaf_jacobi_kernel_h = xdim0; cudaMemcpyToSymbol( xdim1_tea_leaf_jacobi_kernel, &xdim1, sizeof(int) ); xdim1_tea_leaf_jacobi_kernel_h = xdim1; cudaMemcpyToSymbol( xdim2_tea_leaf_jacobi_kernel, &xdim2, sizeof(int) ); xdim2_tea_leaf_jacobi_kernel_h = xdim2; cudaMemcpyToSymbol( xdim3_tea_leaf_jacobi_kernel, &xdim3, sizeof(int) ); xdim3_tea_leaf_jacobi_kernel_h = xdim3; cudaMemcpyToSymbol( xdim4_tea_leaf_jacobi_kernel, &xdim4, sizeof(int) ); xdim4_tea_leaf_jacobi_kernel_h = xdim4; } #ifdef OPS_MPI double *arg7h = (double *)(((ops_reduction)args[7].data)->data + ((ops_reduction)args[7].data)->size * block->index); #else double *arg7h = (double *)(((ops_reduction)args[7].data)->data); #endif dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1); int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)*1); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg7.data = OPS_reduct_h + reduct_bytes; arg7.data_d = OPS_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg7.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; char *p_a[8]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); p_a[4] = (char *)args[4].data_d + base4; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[42].mpi_time += t2-t1; } int nshared = 0; int nthread = OPS_block_size_x*OPS_block_size_y; nshared = MAX(nshared,sizeof(double)*1); nshared = MAX(nshared*nthread,reduct_size*nthread); //call kernel wrapper function, passing in pointers to data ops_tea_leaf_jacobi_kernel<<<grid, tblock, nshared >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], *(double *)arg5.data, *(double *)arg6.data, (double *)arg7.data_d,x_size, y_size); mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg7h[d] = arg7h[d] + ((double *)arg7.data)[d+b*1]; } } arg7.data = (char *)arg7h; if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[42].time += t1-t2; } ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[42].mpi_time += t2-t1; OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[42].transfer += ops_compute_transfer(dim, start, end, &arg4); } }
25bd157b4ff6c936d6e2425ac1e98e3ca6271998.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <conio.h> #include <time.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <string> #include <chrono> #include <thread> #include <Windows.h> using namespace std; #define OBJETIVO 2048 #define DESP_POS 1 #define DESP_NEG -1 #define NO_DESP 0 #ifdef __INTELLISENSE__ void __syncthreads(); // Para evitar el error que da el intellisense con syncthreads y atomicadd void atomicAdd(int *a, int b); #endif // Variables globales para recoger por parmetros struct dimensionesMatriz { int numFilas; int numColumnas; } dimMatriz; dim3 dimGrid; // Grid de bloques dim3 dimBlock; // Hilos por bloque // Variables de control // Juego automtico o manual bool automatico; // N de bytes que ocupa la matriz int bytesMatriz; // Dificultad del juego bool modoDiablo; // Control de vidas: int vidas; // Funciones de juego __host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida); __host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida); // Funciones auxiliares en Device __device__ int getElemento(int *matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz); __device__ void setElemento(int *matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz); // Kernels // Kernel movimiento __global__ void kernelDesplazar(int *h_matrizEntrada, int *h_matrizSalida, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz); // Kernels auxiliares __global__ void kernelSuma(int *h_matrizEntrada, int *h_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz); __global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz); __global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz); // Funciones auxiliares de comprobacin de estado de la matriz __global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz); __global__ void kernelComprobarLlena(int *d_matrizUno, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz); __global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz); __global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz); // Funciones auxiliares en Host __host__ void caracteristicasTarjeta(); __host__ void leerParametros(int argc, const char* argv[]); // Operaciones con matrices __host__ void inicializarMatriz(int *h_matriz); __host__ void rellenarMatrizconcero(int *h_matriz); __host__ void pintarMatriz(int *h_matriz); __host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega); __host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento); __host__ void nuevaSemilla(int *h_matriz, int numSemillas); // Comprobadores __host__ bool estaLlena(int* d_matriz); __host__ bool finJuego(int* d_matriz); __host__ bool movimientosPosibles(int* d_matriz); // Funciones de host de carga y guardado de matrices: __host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos); __host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos); // Funcion de movimiento en Host __host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int *d_matrizEntrada, int *d_matrizSalida, int *h_puntuacion, int despVertical, int despHorizontal); // MAIN int main(int argc, const char* argv[]) { leerParametros(argc, argv); // Declaracion de matrices en host: int* h_matriz = (int *)malloc(bytesMatriz); int* h_matrizResultado = (int *)malloc(bytesMatriz); // Punteros a matrices en DEVICE: int *d_matrizEntrada; int *d_matrizSalida; // Reserva de memoria en DEVICE hipMalloc((void **)&d_matrizEntrada, bytesMatriz); hipMalloc((void **)&d_matrizSalida, bytesMatriz); // Relleno las matrices con 0s: rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); if (automatico) juegoAutomatico(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida); else juegoManual(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida); // Libero la memoria de device hipFree(d_matrizEntrada); hipFree(d_matrizSalida); return 0; } // ----------- MODOS DE JUEGO ----------- // __host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida) { cout << "+--------------------------------------------------------+" << endl; cout << "| Bienvenido al 16384, se ha elegido el modo automatico. |" << endl; cout << "+--------------------------------------------------------+" << endl; inicializarMatriz(h_matriz); // Se comprueban las caracteristicas de la tarjeta cout << "+--------------------------------------------------------+" << endl; caracteristicasTarjeta(); cout << "+--------------------------------------------------------+" << endl; cout << endl; system("pause"); system("cls"); // Contador de movimientos int movimientos = 0; int puntuacion = 0; vidas = 5; // Variable control de entrada bool seguirJugando = false; bool ganado = false; while (!ganado && vidas > 0) { // Eligo un movimiento aleatorio int movimiento = rand() % 4; system("CLS"); // Y lo hago switch (movimiento) { // PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, desplazamiento eje y, desplazamiento eje x case 0: cout << "Muevo arriba " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba ganado = finJuego(d_matrizSalida); break; case 1: cout << "Muevo abajo " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo ganado = finJuego(d_matrizSalida); break; case 2: cout << "Muevo izquierda " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda ganado = finJuego(d_matrizSalida); break; case 3: cout << "Muevo derecha " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha ganado = finJuego(d_matrizSalida); break; } movimientos++; copiarMatriz(h_matrizResultado, h_matriz); cout << "+------------------------------------------------------------+" << endl; printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas); cout << "+------------------------------------------------------------+" << endl; pintarMatriz(h_matriz); if (!seguirJugando && vidas > 1) { cout << "+---------------------------------------------------------------------------------------------+" << endl; cout << "| No hay mas movimientos posibles, la maquina ha perdido. Hemos suspendido el test de Turing. |" << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; vidas -= 1; cout << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; cout << "| Lo intentamos de nuevo (si/no)?. |" << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; string otraVez; cin >> otraVez; if (otraVez == "no") { cout << "Hasta la vista, Baby. " << endl; exit(0); } else if(otraVez == "si") { rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); movimientos = 0; seguirJugando = true; } } else if (ganado) { cout << endl << "LA MAQUINA HA GANADO VIVA TURING " << endl; exit(0); } // Sleep chungo de C++. Cambiar el 100 por lo que se quiera //this_thread::sleep_for(chrono::milliseconds(100)); // Si se quiere avanzar con enters descomentar esto: //system("PAUSE"); } cout << "A la maquina no le quedan vidas. Fin de juego. Adios Terminator. " << endl; exit(0); } __host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida) { // Muestro mensaje de bienvenida cout << "+----------------------------------------------------+" << endl; cout << "|Hola amigo bienvenido al 16384 que ganitas de jugar |" << endl; cout << "+----------------------------------------------------+" << endl; cout << endl; // Muestro caractersticas de la tarjeta cout << "+----------------------------------------------------+" << endl; caracteristicasTarjeta(); cout << "+----------------------------------------------------+" << endl; // Variables de control y estados iniciales: int movimientos = 0; // Contador de movimientos por partida int puntuacion = 0; // Puntuacin total vidas = 5; // Establezco vidas a 5. char entrada1, entrada2; // Carcteres de lectura por teclado bool correcto = false; // Variable control de entrada bool puedeSeguirJugando = false; // An hay movimientos disponibles bool ganado = false; // Si ha ganado bool haGanadoYQuiereSeguir = false; // Comprobacion por si quiere seguir jugando despues de ganar // Recojo nombre de usuario string nombre; cout << "+----------------------------------------------------+" << endl; cout << "| Dame tu nombre amiguete: |" << endl; cout << "+----------------------------------------------------+" << endl; cin >> nombre; cout << endl; // Cargo (o no) la partida cout << "+----------------------------------------------------+" << endl; cout << "| Quieres cargar tu partida? |" << endl; cout << "+----------------------------------------------------+" << endl; string cargar; cin >> cargar; // Si quiere cargar y existe la partida, la cargo. if (cargar == "si" && leerMatriz(h_matriz, nombre, &movimientos, &puntuacion)) { cout << "+----------------------------------------------------+" << endl; cout << "| Partida cargada. |" << endl; cout << "+----------------------------------------------------+" << endl; } // Si no, establezco matriz. else { inicializarMatriz(h_matriz); } // Juego: while (true) { // Imprimo matriz y estadsticas system("CLS"); cout << "+------------------------------------------------------------+" << endl; printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas); cout << "+------------------------------------------------------------+" << endl; pintarMatriz(h_matriz); // Tengo que volver a comprobar la entrada. correcto = true; // Las teclas de movimiento hacen input de dos caracteres, // siendo el segundo el que nos importa para el movimiento entrada1 = getch(); // Si el usuario quiere salir, se sale. if (entrada1 == 's') break; else { // Obtengo segundo caracter entrada2 = getch(); // Realizo jugada: switch (entrada2) { // PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, puntuacion, desplazamiento eje y, desplazamiento eje x case 72: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba ganado = finJuego(d_matrizSalida); break; case 80: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo ganado = finJuego(d_matrizSalida); break; case 75: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda ganado = finJuego(d_matrizSalida); break; case 77: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha ganado = finJuego(d_matrizSalida); break; default: cout << "Caracter incorrecto. " << endl; correcto = false; } } // Tras hacer la jugada, compruebo el estado de la matriz. if (correcto) { // Copio resultado a matriz: copiarMatriz(h_matrizResultado, h_matriz); // Incremento movimientos movimientos++; // Si pierde y le quedan vidas y no estaba farmeando puntos. if (!puedeSeguirJugando && vidas > 1 && !haGanadoYQuiereSeguir) { // Resto una vida vidas -= 1; // Muestro mensaje por pantalla: cout << "+---------------------------------------------------------------------------+" << endl; cout << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo. |" << endl; cout << "| Te quedan: " << vidas << " vidas. | " << endl; cout << "+---------------------------------------------------------------------------+" << endl; // Recojo si quiere seguir jugando: string otraVez; do { cout << "+---------------------------------------------------------------------------+" << endl; cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl; cout << "+---------------------------------------------------------------------------+" << endl; cin >> otraVez; }while (!(otraVez == "si") || !(otraVez != "no")); // Si no quiere seguir jugando, se sale. if (otraVez == "no") { cout << "Nos vemos amigo. " << endl; exit(0); } // Si se quiere seguir jugando, se resetean datos. else { rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); movimientos = 0; ganado = false; haGanadoYQuiereSeguir = false; inicializarMatriz(h_matriz); } } // Si pierde y no le quedan vidas y no estaba farmeando puntos. else if (!puedeSeguirJugando && vidas == 1 && !haGanadoYQuiereSeguir) { vidas -= 1; cout << endl << "No hay mas movimientos posibles, fin del juego." << endl; cout << endl << "Adems no te quedan vidas." << endl; cout << "Esta es tu puntuacion final: " << puntuacion << endl; exit(0); } // Si haba ganado y ahora ya no puede seguir else if (!puedeSeguirJugando && haGanadoYQuiereSeguir) { // Muestro mensaje por pantalla: cout << "+---------------------------------------------------------------------------+" << endl; cout << endl << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo." << endl; cout << endl << "| Te quedan: " << vidas << " vidas. " << endl; cout << "+----------------------------------------------------------------------------+" << endl; // Recojo si quiere seguir jugando: string otraVez; do { cout << "+---------------------------------------------------------------------------------------------+" << endl; cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; cin >> otraVez; } while (otraVez != "si" || otraVez != "no"); // Si no quiere seguir jugando, se sale. if (otraVez == "no") { cout << "Nos vemos amigo. " << endl; exit(0); } // Si se quiere seguir jugando, se resetean datos. else { rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); movimientos = 0; ganado = false; haGanadoYQuiereSeguir = false; inicializarMatriz(h_matriz); } } // Si acaba de ganar else if (ganado && !haGanadoYQuiereSeguir) { cout << "+---------------------------------------------------------------------------+" << endl; cout << "| Felicidades campeon, has ganado. Esta es tu puntuacion final: " << puntuacion << endl; cout << "+---------------------------------------------------------------------------+" << endl; string jugarMas; while (!(jugarMas == "si") && !(jugarMas == "no")) { cout << endl << "Quieres seguir jugando?" << endl; cin >> jugarMas; } if (jugarMas == "no") { cout << "Hasta luego!" << endl; exit(0); } else { haGanadoYQuiereSeguir = true; } } } } // Guardar partida cout << "Quieres guardar partida? " << endl; string entrada; cin >> entrada; if (entrada == "si") { escribirMatriz(h_matriz, nombre, &movimientos, &puntuacion); cout << "Matriz guardada con nombre: " + nombre << endl; } } // ----------- FUNCIONES DEVICE ----------- // __device__ int getElemento(int *d_matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, devuelve el elemento en [fila][columna] */ { return d_matriz[fila * d_dimMatriz->numColumnas + columna]; } __device__ void setElemento(int *d_matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, escribe el elemento en [fila][columna] */ { d_matriz[fila * d_dimMatriz->numColumnas + columna] = elemento; } // --------- KERNELS PRINCIPALES ----------- // __global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz) /* Dada una matriz a copiar, se pega todo el contenido de esta en la matriz a pegar. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Copio: int elemento_copiar = getElemento(d_matrizCopia, fila, columna, d_dimMatriz); // pego setElemento(d_matrizPega, fila, columna, elemento_copiar, d_dimMatriz); } __global__ void kernelSuma(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz) /* Dada una matriz de entrada y una de salida, escribe las sumas por desplazamiento en la matriz de salida. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Variables auxiliares para comprobaciones int ultimaPosicion, desplazamiento, posicionActual; bool esVertical; // Analizo que tipo de movimiento se esta haciendo if (*despVertical != 0) { // Si es vertical, ajusto parmetros: posicionActual = fila; desplazamiento = fila; esVertical = true; if (*despVertical == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numFilas - 1; } else { // Si es horizontal, ajusto parmetros posicionActual = columna; desplazamiento = columna; esVertical = false; if (*despHorizontal == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numColumnas - 1; } // Obtengo el elemento en la posicion int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz); // Variable que controla si se multiplicare elm. x2 o no. bool multiplicarem = false; // Si no soy un 0: if (elemento != 0 && posicionActual != ultimaPosicion) { // Compruebo paridad de los elementos en la direccin en la que me desplazo. int paridad = 1; // Casilla que compruebo en el bucle. int casilla; // Mientras no se encuentre un elemento distinto o se sobrepase la matriz do { // Casilla estudiada if (esVertical) casilla = getElemento(d_matrizEntrada, desplazamiento + *despVertical, columna, d_dimMatriz); else casilla = getElemento(d_matrizEntrada, fila, desplazamiento + *despHorizontal, d_dimMatriz); // Si es diferente al elemento y no es 0, rompemos el bucle. if (casilla != elemento && casilla != 0) { break; } // Si hay otro elemento igual encima, aumento paridad if (casilla == elemento) { paridad += 1; } // Y sigo viendo desplazamiento += *despHorizontal + *despVertical; } while (desplazamiento != ultimaPosicion); // Si hay pares, pongo mult. a true. if (paridad % 2 == 0) { multiplicarem = true; } // Espero a todos los hilos __syncthreads(); // Si debo multiplicar, multiplico if (multiplicarem) { // Encuentro la pos. del elemento a mul * 2 int casilla; desplazamiento = posicionActual; // Reseteamos el desplazamiento // Mientras haya 0s me desplazo. do { desplazamiento += *despHorizontal + *despVertical; if (esVertical) casilla = getElemento(d_matrizEntrada, desplazamiento, columna, d_dimMatriz); else casilla = getElemento(d_matrizEntrada, fila, desplazamiento, d_dimMatriz); } while (casilla != elemento); // Sumo la puntuacion parcial que ha obtenido cada hilo con una suma atomica atomicAdd(d_puntuacion, elemento * 2); // Duplico el elemento que tengo encima if (esVertical) setElemento(d_matrizSalida, desplazamiento, columna, elemento * 2, d_dimMatriz); else setElemento(d_matrizSalida, fila, desplazamiento, elemento * 2, d_dimMatriz); } // Si no, me escribo a mi mismo en la matriz de salida. else { setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz); } // Espero a que todos los hilos multipliquen. __syncthreads(); } else { setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz); } // Espero a que finalicen los hilos. __syncthreads(); } __global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, setea todas sus posiciones a 0. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento en la posicin setElemento(matriz, fila, columna, 0, d_dimMatriz); // Espero a que el resto de hilos pongan 0s. __syncthreads(); } __global__ void kernelDesplazar(int *d_matrizEntrada, int *d_matrizSalida, int* despVertical, int* despHorizontal, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, desplaza sus elementos 1 vez en la direccin indicada, si se puede. */ { // Encuentro posicion y elemento de mi bloque: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz); int ultimaPosicion, posicionActual; // Analizo que tipo de movimiento se esta haciendo if (*despVertical != 0) { posicionActual = fila; if (*despVertical == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numFilas - 1; } else { posicionActual = columna; if (*despHorizontal == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numColumnas - 1; } // Variable que dice si se debe mover o no. bool desplazarem = false; // Si soy distinto de 0 y no estoy en el limite if ((posicionActual != ultimaPosicion) && (elemento != 0)) { // Si la casilla siguiente a la ma en el movimiento es un 0, desplazar hacia esa direccin. int casillaVecina = getElemento(d_matrizEntrada, fila + *despVertical, columna + *despHorizontal, d_dimMatriz); if (casillaVecina == 0) { desplazarem = true; } // Espero a que marquen el resto de hilos. __syncthreads(); // Y desplazo: if (desplazarem) { //printf("Soy [%d][%d] (%d) y me desplazo. \n", fila, columna, elemento); setElemento(d_matrizSalida, fila + *despVertical, columna + *despHorizontal, elemento, d_dimMatriz); } // O escribo mi valor. else { //printf("Soy [%d][%d] (%d) y NO me desplazo. \n", fila, columna, elemento); setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz); } // Espero resto de hilos: __syncthreads(); } // Si estoy en el limite else if (elemento != 0) { //printf("Soy [%d][%d] (%d) y NO me desplazo pq estoy al limite o soy un 0. \n", fila, columna, elemento); setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz); } // Si no, soy un cero y no tengo que escribir nada porque d_matrizSalida es una matriz de 0s. // Espero al resto de hilos __syncthreads(); } // -------- KERNELS COMPROBADORES ---------- // __global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz) /* Dadas dos matrices, deja sonIguales a true si lo son. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento min & mout: int elemento1 = getElemento(d_matrizUno, fila, columna, d_dimMatriz); int elemento2 = getElemento(d_matrizDos, fila, columna, d_dimMatriz); if (elemento1 != elemento2) *d_sonIguales = false; // Espero al resto de hilos: __syncthreads(); } __global__ void kernelComprobarLlena(int *d_matriz, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz) /* Dadas una matriz, pone estaLlena a false si hay algn 0 y, por tanto, no est llena. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento min & mout: int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz); if (elemento == 0) *d_estaLlena = false; // Espero al resto de hilos: __syncthreads(); } __global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz) /* Dadas una matriz, pone estaLlena a false si hay algn 0 y, por tanto, no est llena. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento min & mout: int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz); if (elemento == OBJETIVO) *d_haGanado = true; // Espero al resto de hilos: __syncthreads(); } __global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz) /* Comprueba si hay elementos posibles, si los hay, devuelve true. Si no hay movimientos posibles, devuelve false */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz); bool seguirJugando_aux; // Booleano auxiliar para no escribir en el parametro directamente // Booleanos para ver donde en que direccion podemos movernos bool comprobarArr = true, comprobarAb = true, comprobarIzq = true, comprobarDer = true; // Booleanos para comprobar los elementos con los que no podemos combinarnos bool combinarArr = false, combinarAb = false, combinarIzq = false, combinarDer = false; // Comprobamos en que posicion estamos para no salirnos fuera de los rangos de la matriz if (fila == 0) comprobarArr = false; else if (fila == d_dimMatriz->numFilas - 1) comprobarAb = false; if (columna == 0) comprobarIzq = false; else if (columna == d_dimMatriz->numColumnas - 1) comprobarDer = false; int elementoEstudiado; if (comprobarArr) { elementoEstudiado = getElemento(d_matriz, fila - 1, columna, d_dimMatriz); if (elementoEstudiado == elemento) combinarArr = true; } if (comprobarAb) { elementoEstudiado = getElemento(d_matriz, fila + 1, columna, d_dimMatriz); if (elementoEstudiado == elemento) combinarAb = true; } if (comprobarDer) { elementoEstudiado = getElemento(d_matriz, fila, columna + 1, d_dimMatriz); if (elementoEstudiado == elemento) combinarDer = true; } if (comprobarIzq) { elementoEstudiado = getElemento(d_matriz, fila, columna - 1, d_dimMatriz); if (elementoEstudiado == elemento) combinarIzq = true; } seguirJugando_aux = combinarArr || combinarAb || combinarIzq || combinarDer; if (seguirJugando_aux) *seguirJugando = seguirJugando_aux; } // -------- FUNCIONES AUX HOST ----------- // __host__ void leerParametros(int argc, const char* argv[]) /* Parsea los parmetros introducidos en la llamada al programa por consola, seteando las variables del juego. */ { if ((argc != 5) || ((argv[1][0] != 'a') && (argv[1][0] != 'm')) || ((argv[2][0] != 'f') && (argv[2][0] != 'd'))) { cout << "Error en la introduccion de parametros, los parametros son:\nautomatico/manual (a/m), facil/dificil (f/d), num_filas, num_columnas\n\nUso = nombreprograma a/m f/d num_filas num_columnas\n" << endl; exit(1); } else { dimMatriz.numFilas = atoi(argv[3]); dimMatriz.numColumnas = atoi(argv[4]); if (dimMatriz.numFilas != dimMatriz.numColumnas) { cout << "El numero de filas y de columnas no puede ser distinto, crack." << endl; exit(2); } bytesMatriz = atoi(argv[3]) * atoi(argv[4]) * sizeof(int); // Se dimensionan los hilos y los grids de bloques if (dimMatriz.numFilas % 2 == 0) { dim3 bloques(2, 2); dim3 hilos(dimMatriz.numFilas / 2, dimMatriz.numColumnas / 2); dimGrid = bloques; dimBlock = hilos; } else { dim3 bloques(1, 1); dim3 hilos(dimMatriz.numFilas, dimMatriz.numColumnas); dimGrid = bloques; dimBlock = hilos; } if (argv[1][0] == 'a') automatico = true; else automatico = false; if (argv[2][0] == 'f') modoDiablo = false; else modoDiablo = true; } } __host__ void pintarMatriz(int *h_matriz) /* Dada una matriz, la dibuja por pantalla. */ { HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE); for (size_t i = 0; i < dimMatriz.numColumnas; i++) { SetConsoleTextAttribute(hConsole, 14); cout << ("+-------"); } cout << "+" << endl; for (int i = 0; i < dimMatriz.numColumnas; i++) { for (int j = 0; j < dimMatriz.numFilas; j++) { // La funcion de print evalua en su interior si deberia poner un /t o no, dependiendo de la longitud del numero printf("[%d%s]", *(h_matriz + i * dimMatriz.numColumnas + j), *(h_matriz + i * dimMatriz.numColumnas + j) % 100000 == *(h_matriz + i * dimMatriz.numColumnas + j) ? "\t" : ""); } printf("\n"); } for (size_t i = 0; i < dimMatriz.numColumnas; i++) { cout << ("+-------"); } cout << "+" << endl; SetConsoleTextAttribute(hConsole, 15); } __host__ void caracteristicasTarjeta() /* Saca por pantalla las caracteristicas de todas las tarjetas graficas del pc */ { // Recojo el nmero de tarjetas de la grfica int numTarjetas; hipGetDeviceCount(&numTarjetas); // Para cada una, imprimo sus caractersticas for (int i = 0; i < numTarjetas; i++) { hipDeviceProp_t caracteristicas; hipGetDeviceProperties(&caracteristicas, i); printf("Numero de dispositivo: %d\n", i); printf(" Nombre del dispositivo: %s\n", caracteristicas.name); printf(" Frecuencia del reloj de memoria (KHz): %d\n", caracteristicas.memoryClockRate); printf(" Interfaz de memoria (bits): %d\n", caracteristicas.memoryBusWidth); printf(" Ancho de banda de memoria (GB/s): %f\n", 2.0*caracteristicas.memoryClockRate*(caracteristicas.memoryBusWidth / 8) / 1.0e6); } } // ------- OP. CON MATRIZ EN HOST ------- // __host__ void inicializarMatriz(int *h_matriz) /* Dada una matriz, la rellena con 0s, 2s, 4s u 8s, aleatoriamente y dependiendo del nivel de dificultad elegido. */ { srand(time(NULL)); // Contador de casillas rellenadas. Dependiendo de la dificultad, tiene un tope distinto. int contadorSemillas = 0; int *posicionAleatoria; if (modoDiablo) { int array_posibles_numeros[] = { 2,4 }; while ((contadorSemillas < 8) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4) contadorSemillas++; // Sumo uno al contador de semillas } } } else { int array_posibles_numeros[] = { 2,4,8 }; while ((contadorSemillas < 15) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8) contadorSemillas++; // Sumo uno al contador de semillas } } } } __host__ void rellenarMatrizconcero(int *h_matriz) /* Dada una matriz, la rellena con 0s. */ { for (int i = 0; i < dimMatriz.numColumnas; ++i) { for (int j = 0; j < dimMatriz.numFilas; ++j) { *(h_matriz + i * dimMatriz.numColumnas + j) = 0; } } } __host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega) /* Copia matriz de copia en matriz de pega. */ { // Punteros a matrices en DEVICE: int *d_matrizCopia; int *d_matrizPega; dimensionesMatriz* d_dimMatriz; // Reservo memoria en DEVICE: hipMalloc((void **)&d_matrizCopia, bytesMatriz); hipMalloc((void **)&d_matrizPega, bytesMatriz); hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); // Muevo matrices de HOST a DEVICE: hipMemcpy(d_matrizCopia, h_matrizCopia, bytesMatriz, hipMemcpyHostToDevice); hipMemcpy(d_matrizPega, h_matrizPega, bytesMatriz, hipMemcpyHostToDevice); hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice); // Primero, copio salida a entrada. kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizCopia, d_matrizPega, d_dimMatriz); hipDeviceSynchronize(); // Despus, pongo a 0 la matriz de copia. kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizCopia, d_dimMatriz); hipDeviceSynchronize(); // Devolvemos resultado de DEVICE a HOST: hipMemcpy(h_matrizPega, d_matrizPega, bytesMatriz, hipMemcpyDeviceToHost); hipMemcpy(h_matrizCopia, d_matrizCopia, bytesMatriz, hipMemcpyDeviceToHost); // Libero memoria de DEVICE: hipFree(d_matrizPega); hipFree(d_matrizCopia); hipFree(d_dimMatriz); } __host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int* d_matrizEntrada, int* d_matrizSalida, int* h_puntuacion, int despVertical, int despHorizontal) { int* d_despVertical = 0; int* d_despHorizontal = 0; int* d_puntuacion = 0; dimensionesMatriz* d_dimMatriz; // Reservo memoria en DEVICE: hipMalloc((void **)&d_despVertical, sizeof(int)); hipMalloc((void **)&d_despHorizontal, sizeof(int)); hipMalloc((void **)&d_puntuacion, sizeof(int)); hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); // Muevo matrices de HOST a DEVICE: hipMemcpy(d_matrizEntrada, h_matrizEntrada, bytesMatriz, hipMemcpyHostToDevice); hipMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, hipMemcpyHostToDevice); hipMemcpy(d_puntuacion, h_puntuacion, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice); hipMemcpy(d_despVertical, &despVertical, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_despHorizontal, &despHorizontal, sizeof(int), hipMemcpyHostToDevice); // Realizo la suma: kernelSuma << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_puntuacion, d_despVertical, d_despHorizontal, d_dimMatriz); // Espero a que termine de operar: hipDeviceSynchronize(); hipMemcpy(h_puntuacion, d_puntuacion, sizeof(int), hipMemcpyDeviceToHost); // Variable que dice si las matrices son iguales o no. bool h_iguales = true; bool *d_iguales; hipMalloc((void **)&d_iguales, sizeof(bool)); // Mientras la matriz de entrada sea distinta de salida, // significa que puedo seguir desplazando. // Cuando sean iguales, detengo el bucle. do { // Primero, copio salida a entrada. kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_dimMatriz); hipDeviceSynchronize(); // Segundo, seteo salida a 0. kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizSalida, d_dimMatriz); hipDeviceSynchronize(); // Desplazo kernelDesplazar << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_despVertical, d_despHorizontal, d_dimMatriz); hipDeviceSynchronize(); // Compruebo si tengo que seguir desplazando. // Doy por hecho que son iguales. Si no lo son, desplazare. h_iguales = true; // Muevo a device. hipMemcpy(d_iguales, &h_iguales, sizeof(bool), hipMemcpyHostToDevice); // Veo si son iguales. kernelComprobarIguales << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_iguales, d_dimMatriz); hipDeviceSynchronize(); // Limpio memoria tras trastear con d_iguales. hipMemcpy(&h_iguales, d_iguales, sizeof(bool), hipMemcpyDeviceToHost); } while (!h_iguales); hipFree(d_iguales); // Compruebo si la matriz est llena y si se puede mover en cualq. direccin bool h_movimientosPosibles = true; // Devolvemos resultado de DEVICE a HOST: hipMemcpy(h_matrizSalida, d_matrizSalida, bytesMatriz, hipMemcpyDeviceToHost); // Si esta llena compruebo si hay movimientos posibles if (estaLlena(d_matrizSalida)) h_movimientosPosibles = movimientosPosibles(d_matrizSalida); // Si no, aado una nueva semilla a la matriz resultante en host else { nuevaSemilla(h_matrizSalida, 1); // Aadimos la nueva semilla // Comprobamos si con la nueva semilla anadida, hemos perdido hipMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, hipMemcpyHostToDevice); if (estaLlena(d_matrizSalida)) h_movimientosPosibles = movimientosPosibles(d_matrizSalida); } // Libero memoria de DEVICE: hipFree(d_despVertical); hipFree(d_despHorizontal); hipFree(d_dimMatriz); return h_movimientosPosibles; } __host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento) /* Dada una matriz, escribe el elemento en [fila][columna] */ { h_matriz[fila * dimMatriz.numColumnas + columna] = elemento; } __host__ void nuevaSemilla(int *h_matriz, int numSemillas) /* Crea numSemillas nuevas semillas en la matriz almacenada en device */ { int *posicionAleatoria; bool semillaGenerada = false; if (modoDiablo) { int array_posibles_numeros[] = { 2,4 }; while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hallan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4) semillaGenerada = true; numSemillas--; } } } else { int array_posibles_numeros[] = { 2,4,8 }; while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hayan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8) semillaGenerada = true; numSemillas--; } } } } // ------- COMPROBACIONES EN HOST ------- // __host__ bool estaLlena(int* d_matriz) { // Compruebo si la matriz esta llena bool h_estaLlena = true; bool *d_estaLlena; dimensionesMatriz* d_dimMatriz; hipMalloc((void **)&d_estaLlena, sizeof(bool)); hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); hipMemcpy(d_estaLlena, &h_estaLlena, sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice); // Veo si est llena. kernelComprobarLlena << < dimGrid, dimBlock >> > (d_matriz, d_estaLlena, d_dimMatriz); hipDeviceSynchronize(); hipMemcpy(&h_estaLlena, d_estaLlena, sizeof(bool), hipMemcpyDeviceToHost); // Limpio memoria tras trastear con d_estaLlena. hipFree(d_estaLlena); hipFree(d_dimMatriz); return h_estaLlena; } __host__ bool finJuego(int* d_matriz) { // Compruebo si la matriz contiene algn 16384 bool h_haGanado = false; bool *d_haGanado; dimensionesMatriz* d_dimMatriz; hipMalloc((void **)&d_haGanado, sizeof(bool)); hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); hipMemcpy(d_haGanado, &h_haGanado, sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice); // Veo si est llena. kernelComprobarSiHaGanado << < dimGrid, dimBlock >> > (d_matriz, d_haGanado, d_dimMatriz); hipDeviceSynchronize(); hipMemcpy(&h_haGanado, d_haGanado, sizeof(bool), hipMemcpyDeviceToHost); // Limpio memoria tras trastear con d_estaLlena. hipFree(d_haGanado); hipFree(d_dimMatriz); return h_haGanado; } __host__ bool movimientosPosibles(int* d_matriz) /* Llama al kernel de comprobacion de movimientos posibles */ { bool h_movimientosPosibles = false; dimensionesMatriz* d_dimMatriz; bool *d_movimientosPosibles; hipMalloc((void **)&d_movimientosPosibles, sizeof(bool)); hipMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); hipMemcpy(d_movimientosPosibles, &h_movimientosPosibles, sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), hipMemcpyHostToDevice); // Compruebo si hay movimientos que se puedan hacer kernelComprobarMovimientosPosibles << < dimGrid, dimBlock >> > (d_matriz, d_movimientosPosibles, d_dimMatriz); hipDeviceSynchronize(); // Paso el booleano a memoria del host y libero la memoria de device hipMemcpy(&h_movimientosPosibles, d_movimientosPosibles, sizeof(bool), hipMemcpyDeviceToHost); hipFree(d_dimMatriz); hipFree(d_movimientosPosibles); return h_movimientosPosibles; } // ----- GUARDADO Y LECTURA ----- // __host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos) { FILE *archivo; // Preparo nombre: nombreJugador += ".txt"; char * nombreArchivo = new char[nombreJugador.length() + 1]; strcpy(nombreArchivo, nombreJugador.c_str()); // Abro archivo: archivo = fopen(nombreArchivo, "w"); if (archivo == NULL) { cout << "Error escribiendo partida. " << endl; } else { fprintf(archivo, "%d\n", dimMatriz.numFilas); fprintf(archivo, "%d\n", dimMatriz.numColumnas); fprintf(archivo, "%d\n", vidas); fprintf(archivo, "%d\n", *movimientos); fprintf(archivo, "%d\n", *puntuacion); for (int i = 0; i < dimMatriz.numColumnas; ++i) { for (int j = 0; j < dimMatriz.numFilas; ++j) { fprintf(archivo, "%d ", *(h_matriz + i * dimMatriz.numColumnas + j)); } fprintf(archivo, "\n"); } } fclose(archivo); } __host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos) { // Cargo el archivo ifstream in(nombreJugador + ".txt"); bool lecturaCorrecta = true; // Si error if (!in) { cout << "Erro abriendo el archivo. La partida no existe, se iniciara una partida nueva." << endl; lecturaCorrecta = false; } // Si no, escribo matriz else { int a_filas, a_columnas; in >> a_filas; in >> a_columnas; in >> vidas; if (a_filas != dimMatriz.numFilas || a_columnas != dimMatriz.numColumnas) { cout << "La partida cargada no es congruente con el numero de filas/columnas pasada como parametro." << endl; cout << "Se iniciara una partida nueva." << endl; lecturaCorrecta = false; } else { // Cargo movimientos y puntuacion in >> *movimientos; in >> *puntuacion; for (int fila = 0; fila < dimMatriz.numFilas; fila++) { for (int columna = 0; columna < dimMatriz.numColumnas; columna++) { // Parseo el numero int num; in >> num; // Lo escribo en la posicion setElementoHost(h_matriz, fila, columna, num); } } } } // Cierro archivo in.close(); return lecturaCorrecta; }
25bd157b4ff6c936d6e2425ac1e98e3ca6271998.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <conio.h> #include <time.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <string> #include <chrono> #include <thread> #include <Windows.h> using namespace std; #define OBJETIVO 2048 #define DESP_POS 1 #define DESP_NEG -1 #define NO_DESP 0 #ifdef __INTELLISENSE__ void __syncthreads(); // Para evitar el error que da el intellisense con syncthreads y atomicadd void atomicAdd(int *a, int b); #endif // Variables globales para recoger por parámetros struct dimensionesMatriz { int numFilas; int numColumnas; } dimMatriz; dim3 dimGrid; // Grid de bloques dim3 dimBlock; // Hilos por bloque // Variables de control // Juego automático o manual bool automatico; // Nº de bytes que ocupa la matriz int bytesMatriz; // Dificultad del juego bool modoDiablo; // Control de vidas: int vidas; // Funciones de juego __host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida); __host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida); // Funciones auxiliares en Device __device__ int getElemento(int *matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz); __device__ void setElemento(int *matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz); // Kernels // Kernel movimiento __global__ void kernelDesplazar(int *h_matrizEntrada, int *h_matrizSalida, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz); // Kernels auxiliares __global__ void kernelSuma(int *h_matrizEntrada, int *h_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz); __global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz); __global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz); // Funciones auxiliares de comprobación de estado de la matriz __global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz); __global__ void kernelComprobarLlena(int *d_matrizUno, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz); __global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz); __global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz); // Funciones auxiliares en Host __host__ void caracteristicasTarjeta(); __host__ void leerParametros(int argc, const char* argv[]); // Operaciones con matrices __host__ void inicializarMatriz(int *h_matriz); __host__ void rellenarMatrizconcero(int *h_matriz); __host__ void pintarMatriz(int *h_matriz); __host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega); __host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento); __host__ void nuevaSemilla(int *h_matriz, int numSemillas); // Comprobadores __host__ bool estaLlena(int* d_matriz); __host__ bool finJuego(int* d_matriz); __host__ bool movimientosPosibles(int* d_matriz); // Funciones de host de carga y guardado de matrices: __host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos); __host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos); // Funcion de movimiento en Host __host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int *d_matrizEntrada, int *d_matrizSalida, int *h_puntuacion, int despVertical, int despHorizontal); // MAIN int main(int argc, const char* argv[]) { leerParametros(argc, argv); // Declaracion de matrices en host: int* h_matriz = (int *)malloc(bytesMatriz); int* h_matrizResultado = (int *)malloc(bytesMatriz); // Punteros a matrices en DEVICE: int *d_matrizEntrada; int *d_matrizSalida; // Reserva de memoria en DEVICE cudaMalloc((void **)&d_matrizEntrada, bytesMatriz); cudaMalloc((void **)&d_matrizSalida, bytesMatriz); // Relleno las matrices con 0s: rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); if (automatico) juegoAutomatico(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida); else juegoManual(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida); // Libero la memoria de device cudaFree(d_matrizEntrada); cudaFree(d_matrizSalida); return 0; } // ----------- MODOS DE JUEGO ----------- // __host__ void juegoAutomatico(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida) { cout << "+--------------------------------------------------------+" << endl; cout << "| Bienvenido al 16384, se ha elegido el modo automatico. |" << endl; cout << "+--------------------------------------------------------+" << endl; inicializarMatriz(h_matriz); // Se comprueban las caracteristicas de la tarjeta cout << "+--------------------------------------------------------+" << endl; caracteristicasTarjeta(); cout << "+--------------------------------------------------------+" << endl; cout << endl; system("pause"); system("cls"); // Contador de movimientos int movimientos = 0; int puntuacion = 0; vidas = 5; // Variable control de entrada bool seguirJugando = false; bool ganado = false; while (!ganado && vidas > 0) { // Eligo un movimiento aleatorio int movimiento = rand() % 4; system("CLS"); // Y lo hago switch (movimiento) { // PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, desplazamiento eje y, desplazamiento eje x case 0: cout << "Muevo arriba " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba ganado = finJuego(d_matrizSalida); break; case 1: cout << "Muevo abajo " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo ganado = finJuego(d_matrizSalida); break; case 2: cout << "Muevo izquierda " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda ganado = finJuego(d_matrizSalida); break; case 3: cout << "Muevo derecha " << endl; seguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha ganado = finJuego(d_matrizSalida); break; } movimientos++; copiarMatriz(h_matrizResultado, h_matriz); cout << "+------------------------------------------------------------+" << endl; printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas); cout << "+------------------------------------------------------------+" << endl; pintarMatriz(h_matriz); if (!seguirJugando && vidas > 1) { cout << "+---------------------------------------------------------------------------------------------+" << endl; cout << "| No hay mas movimientos posibles, la maquina ha perdido. Hemos suspendido el test de Turing. |" << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; vidas -= 1; cout << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; cout << "| Lo intentamos de nuevo (si/no)?. |" << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; string otraVez; cin >> otraVez; if (otraVez == "no") { cout << "Hasta la vista, Baby. " << endl; exit(0); } else if(otraVez == "si") { rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); movimientos = 0; seguirJugando = true; } } else if (ganado) { cout << endl << "LA MAQUINA HA GANADO VIVA TURING " << endl; exit(0); } // Sleep chungo de C++. Cambiar el 100 por lo que se quiera //this_thread::sleep_for(chrono::milliseconds(100)); // Si se quiere avanzar con enters descomentar esto: //system("PAUSE"); } cout << "A la maquina no le quedan vidas. Fin de juego. Adios Terminator. " << endl; exit(0); } __host__ void juegoManual(int *h_matriz, int *h_matrizResultado, int *d_matrizEntrada, int *d_matrizSalida) { // Muestro mensaje de bienvenida cout << "+----------------------------------------------------+" << endl; cout << "|Hola amigo bienvenido al 16384 que ganitas de jugar |" << endl; cout << "+----------------------------------------------------+" << endl; cout << endl; // Muestro características de la tarjeta cout << "+----------------------------------------------------+" << endl; caracteristicasTarjeta(); cout << "+----------------------------------------------------+" << endl; // Variables de control y estados iniciales: int movimientos = 0; // Contador de movimientos por partida int puntuacion = 0; // Puntuación total vidas = 5; // Establezco vidas a 5. char entrada1, entrada2; // Carácteres de lectura por teclado bool correcto = false; // Variable control de entrada bool puedeSeguirJugando = false; // Aún hay movimientos disponibles bool ganado = false; // Si ha ganado bool haGanadoYQuiereSeguir = false; // Comprobacion por si quiere seguir jugando despues de ganar // Recojo nombre de usuario string nombre; cout << "+----------------------------------------------------+" << endl; cout << "| Dame tu nombre amiguete: |" << endl; cout << "+----------------------------------------------------+" << endl; cin >> nombre; cout << endl; // Cargo (o no) la partida cout << "+----------------------------------------------------+" << endl; cout << "| Quieres cargar tu partida? |" << endl; cout << "+----------------------------------------------------+" << endl; string cargar; cin >> cargar; // Si quiere cargar y existe la partida, la cargo. if (cargar == "si" && leerMatriz(h_matriz, nombre, &movimientos, &puntuacion)) { cout << "+----------------------------------------------------+" << endl; cout << "| Partida cargada. |" << endl; cout << "+----------------------------------------------------+" << endl; } // Si no, establezco matriz. else { inicializarMatriz(h_matriz); } // Juego: while (true) { // Imprimo matriz y estadísticas system("CLS"); cout << "+------------------------------------------------------------+" << endl; printf("|Movimiento: %d\tPuntuacion: %d\t Vidas: %d \n", movimientos, puntuacion, vidas); cout << "+------------------------------------------------------------+" << endl; pintarMatriz(h_matriz); // Tengo que volver a comprobar la entrada. correcto = true; // Las teclas de movimiento hacen input de dos caracteres, // siendo el segundo el que nos importa para el movimiento entrada1 = getch(); // Si el usuario quiere salir, se sale. if (entrada1 == 's') break; else { // Obtengo segundo caracter entrada2 = getch(); // Realizo jugada: switch (entrada2) { // PARAMETROS DESPLAZAR_MATRIZ -> matriz inicial, matriz resultado, puntuacion, desplazamiento eje y, desplazamiento eje x case 72: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_NEG, NO_DESP); // Desplazar arriba ganado = finJuego(d_matrizSalida); break; case 80: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, DESP_POS, NO_DESP); // Desplazar abajo ganado = finJuego(d_matrizSalida); break; case 75: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_NEG); // Desplazar izquierda ganado = finJuego(d_matrizSalida); break; case 77: puedeSeguirJugando = desplazarMatriz(h_matriz, h_matrizResultado, d_matrizEntrada, d_matrizSalida, &puntuacion, NO_DESP, DESP_POS); // Desplazar derecha ganado = finJuego(d_matrizSalida); break; default: cout << "Caracter incorrecto. " << endl; correcto = false; } } // Tras hacer la jugada, compruebo el estado de la matriz. if (correcto) { // Copio resultado a matriz: copiarMatriz(h_matrizResultado, h_matriz); // Incremento movimientos movimientos++; // Si pierde y le quedan vidas y no estaba farmeando puntos. if (!puedeSeguirJugando && vidas > 1 && !haGanadoYQuiereSeguir) { // Resto una vida vidas -= 1; // Muestro mensaje por pantalla: cout << "+---------------------------------------------------------------------------+" << endl; cout << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo. |" << endl; cout << "| Te quedan: " << vidas << " vidas. | " << endl; cout << "+---------------------------------------------------------------------------+" << endl; // Recojo si quiere seguir jugando: string otraVez; do { cout << "+---------------------------------------------------------------------------+" << endl; cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl; cout << "+---------------------------------------------------------------------------+" << endl; cin >> otraVez; }while (!(otraVez == "si") || !(otraVez != "no")); // Si no quiere seguir jugando, se sale. if (otraVez == "no") { cout << "Nos vemos amigo. " << endl; exit(0); } // Si se quiere seguir jugando, se resetean datos. else { rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); movimientos = 0; ganado = false; haGanadoYQuiereSeguir = false; inicializarMatriz(h_matriz); } } // Si pierde y no le quedan vidas y no estaba farmeando puntos. else if (!puedeSeguirJugando && vidas == 1 && !haGanadoYQuiereSeguir) { vidas -= 1; cout << endl << "No hay mas movimientos posibles, fin del juego." << endl; cout << endl << "Además no te quedan vidas." << endl; cout << "Esta es tu puntuacion final: " << puntuacion << endl; exit(0); } // Si había ganado y ahora ya no puede seguir else if (!puedeSeguirJugando && haGanadoYQuiereSeguir) { // Muestro mensaje por pantalla: cout << "+---------------------------------------------------------------------------+" << endl; cout << endl << "| No hay mas movimientos posibles, fin de juego. Intentalo de nuevo." << endl; cout << endl << "| Te quedan: " << vidas << " vidas. " << endl; cout << "+----------------------------------------------------------------------------+" << endl; // Recojo si quiere seguir jugando: string otraVez; do { cout << "+---------------------------------------------------------------------------------------------+" << endl; cout << "| Quieres intentarlo de nuevo (si/no)? |" << endl; cout << "+---------------------------------------------------------------------------------------------+" << endl; cin >> otraVez; } while (otraVez != "si" || otraVez != "no"); // Si no quiere seguir jugando, se sale. if (otraVez == "no") { cout << "Nos vemos amigo. " << endl; exit(0); } // Si se quiere seguir jugando, se resetean datos. else { rellenarMatrizconcero(h_matriz); rellenarMatrizconcero(h_matrizResultado); movimientos = 0; ganado = false; haGanadoYQuiereSeguir = false; inicializarMatriz(h_matriz); } } // Si acaba de ganar else if (ganado && !haGanadoYQuiereSeguir) { cout << "+---------------------------------------------------------------------------+" << endl; cout << "| Felicidades campeon, has ganado. Esta es tu puntuacion final: " << puntuacion << endl; cout << "+---------------------------------------------------------------------------+" << endl; string jugarMas; while (!(jugarMas == "si") && !(jugarMas == "no")) { cout << endl << "Quieres seguir jugando?" << endl; cin >> jugarMas; } if (jugarMas == "no") { cout << "Hasta luego!" << endl; exit(0); } else { haGanadoYQuiereSeguir = true; } } } } // Guardar partida cout << "Quieres guardar partida? " << endl; string entrada; cin >> entrada; if (entrada == "si") { escribirMatriz(h_matriz, nombre, &movimientos, &puntuacion); cout << "Matriz guardada con nombre: " + nombre << endl; } } // ----------- FUNCIONES DEVICE ----------- // __device__ int getElemento(int *d_matriz, int fila, int columna, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, devuelve el elemento en [fila][columna] */ { return d_matriz[fila * d_dimMatriz->numColumnas + columna]; } __device__ void setElemento(int *d_matriz, int fila, int columna, int elemento, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, escribe el elemento en [fila][columna] */ { d_matriz[fila * d_dimMatriz->numColumnas + columna] = elemento; } // --------- KERNELS PRINCIPALES ----------- // __global__ void kernelCopiarMatriz(int *d_matrizCopia, int *d_matrizPega, dimensionesMatriz* d_dimMatriz) /* Dada una matriz a copiar, se pega todo el contenido de esta en la matriz a pegar. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Copio: int elemento_copiar = getElemento(d_matrizCopia, fila, columna, d_dimMatriz); // pego setElemento(d_matrizPega, fila, columna, elemento_copiar, d_dimMatriz); } __global__ void kernelSuma(int *d_matrizEntrada, int *d_matrizSalida, int *d_puntuacion, int *despVertical, int *despHorizontal, dimensionesMatriz* d_dimMatriz) /* Dada una matriz de entrada y una de salida, escribe las sumas por desplazamiento en la matriz de salida. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Variables auxiliares para comprobaciones int ultimaPosicion, desplazamiento, posicionActual; bool esVertical; // Analizo que tipo de movimiento se esta haciendo if (*despVertical != 0) { // Si es vertical, ajusto parámetros: posicionActual = fila; desplazamiento = fila; esVertical = true; if (*despVertical == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numFilas - 1; } else { // Si es horizontal, ajusto parámetros posicionActual = columna; desplazamiento = columna; esVertical = false; if (*despHorizontal == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numColumnas - 1; } // Obtengo el elemento en la posicion int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz); // Variable que controla si se multiplicare elm. x2 o no. bool multiplicarem = false; // Si no soy un 0: if (elemento != 0 && posicionActual != ultimaPosicion) { // Compruebo paridad de los elementos en la dirección en la que me desplazo. int paridad = 1; // Casilla que compruebo en el bucle. int casilla; // Mientras no se encuentre un elemento distinto o se sobrepase la matriz do { // Casilla estudiada if (esVertical) casilla = getElemento(d_matrizEntrada, desplazamiento + *despVertical, columna, d_dimMatriz); else casilla = getElemento(d_matrizEntrada, fila, desplazamiento + *despHorizontal, d_dimMatriz); // Si es diferente al elemento y no es 0, rompemos el bucle. if (casilla != elemento && casilla != 0) { break; } // Si hay otro elemento igual encima, aumento paridad if (casilla == elemento) { paridad += 1; } // Y sigo viendo desplazamiento += *despHorizontal + *despVertical; } while (desplazamiento != ultimaPosicion); // Si hay pares, pongo mult. a true. if (paridad % 2 == 0) { multiplicarem = true; } // Espero a todos los hilos __syncthreads(); // Si debo multiplicar, multiplico if (multiplicarem) { // Encuentro la pos. del elemento a mul * 2 int casilla; desplazamiento = posicionActual; // Reseteamos el desplazamiento // Mientras haya 0s me desplazo. do { desplazamiento += *despHorizontal + *despVertical; if (esVertical) casilla = getElemento(d_matrizEntrada, desplazamiento, columna, d_dimMatriz); else casilla = getElemento(d_matrizEntrada, fila, desplazamiento, d_dimMatriz); } while (casilla != elemento); // Sumo la puntuacion parcial que ha obtenido cada hilo con una suma atomica atomicAdd(d_puntuacion, elemento * 2); // Duplico el elemento que tengo encima if (esVertical) setElemento(d_matrizSalida, desplazamiento, columna, elemento * 2, d_dimMatriz); else setElemento(d_matrizSalida, fila, desplazamiento, elemento * 2, d_dimMatriz); } // Si no, me escribo a mi mismo en la matriz de salida. else { setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz); } // Espero a que todos los hilos multipliquen. __syncthreads(); } else { setElemento(d_matrizSalida, fila, columna, getElemento(d_matrizEntrada, fila, columna, d_dimMatriz), d_dimMatriz); } // Espero a que finalicen los hilos. __syncthreads(); } __global__ void kernelSetMatrizCeros(int *matriz, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, setea todas sus posiciones a 0. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento en la posici�n setElemento(matriz, fila, columna, 0, d_dimMatriz); // Espero a que el resto de hilos pongan 0s. __syncthreads(); } __global__ void kernelDesplazar(int *d_matrizEntrada, int *d_matrizSalida, int* despVertical, int* despHorizontal, dimensionesMatriz* d_dimMatriz) /* Dada una matriz, desplaza sus elementos 1 vez en la dirección indicada, si se puede. */ { // Encuentro posicion y elemento de mi bloque: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; int elemento = getElemento(d_matrizEntrada, fila, columna, d_dimMatriz); int ultimaPosicion, posicionActual; // Analizo que tipo de movimiento se esta haciendo if (*despVertical != 0) { posicionActual = fila; if (*despVertical == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numFilas - 1; } else { posicionActual = columna; if (*despHorizontal == -1) ultimaPosicion = 0; else ultimaPosicion = d_dimMatriz->numColumnas - 1; } // Variable que dice si se debe mover o no. bool desplazarem = false; // Si soy distinto de 0 y no estoy en el limite if ((posicionActual != ultimaPosicion) && (elemento != 0)) { // Si la casilla siguiente a la mía en el movimiento es un 0, desplazaré hacia esa dirección. int casillaVecina = getElemento(d_matrizEntrada, fila + *despVertical, columna + *despHorizontal, d_dimMatriz); if (casillaVecina == 0) { desplazarem = true; } // Espero a que marquen el resto de hilos. __syncthreads(); // Y desplazo: if (desplazarem) { //printf("Soy [%d][%d] (%d) y me desplazo. \n", fila, columna, elemento); setElemento(d_matrizSalida, fila + *despVertical, columna + *despHorizontal, elemento, d_dimMatriz); } // O escribo mi valor. else { //printf("Soy [%d][%d] (%d) y NO me desplazo. \n", fila, columna, elemento); setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz); } // Espero resto de hilos: __syncthreads(); } // Si estoy en el limite else if (elemento != 0) { //printf("Soy [%d][%d] (%d) y NO me desplazo pq estoy al limite o soy un 0. \n", fila, columna, elemento); setElemento(d_matrizSalida, fila, columna, elemento, d_dimMatriz); } // Si no, soy un cero y no tengo que escribir nada porque d_matrizSalida es una matriz de 0s. // Espero al resto de hilos __syncthreads(); } // -------- KERNELS COMPROBADORES ---------- // __global__ void kernelComprobarIguales(int *d_matrizUno, int *d_matrizDos, bool* d_sonIguales, dimensionesMatriz* d_dimMatriz) /* Dadas dos matrices, deja sonIguales a true si lo son. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento min & mout: int elemento1 = getElemento(d_matrizUno, fila, columna, d_dimMatriz); int elemento2 = getElemento(d_matrizDos, fila, columna, d_dimMatriz); if (elemento1 != elemento2) *d_sonIguales = false; // Espero al resto de hilos: __syncthreads(); } __global__ void kernelComprobarLlena(int *d_matriz, bool* d_estaLlena, dimensionesMatriz* d_dimMatriz) /* Dadas una matriz, pone estaLlena a false si hay algún 0 y, por tanto, no está llena. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento min & mout: int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz); if (elemento == 0) *d_estaLlena = false; // Espero al resto de hilos: __syncthreads(); } __global__ void kernelComprobarSiHaGanado(int *d_matriz, bool* d_haGanado, dimensionesMatriz* d_dimMatriz) /* Dadas una matriz, pone estaLlena a false si hay algún 0 y, por tanto, no está llena. */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; // Elemento min & mout: int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz); if (elemento == OBJETIVO) *d_haGanado = true; // Espero al resto de hilos: __syncthreads(); } __global__ void kernelComprobarMovimientosPosibles(int *d_matriz, bool *seguirJugando, dimensionesMatriz* d_dimMatriz) /* Comprueba si hay elementos posibles, si los hay, devuelve true. Si no hay movimientos posibles, devuelve false */ { // Encuentro posicion: int fila = blockIdx.y * blockDim.y + threadIdx.y; int columna = blockIdx.x * blockDim.x + threadIdx.x; int elemento = getElemento(d_matriz, fila, columna, d_dimMatriz); bool seguirJugando_aux; // Booleano auxiliar para no escribir en el parametro directamente // Booleanos para ver donde en que direccion podemos movernos bool comprobarArr = true, comprobarAb = true, comprobarIzq = true, comprobarDer = true; // Booleanos para comprobar los elementos con los que no podemos combinarnos bool combinarArr = false, combinarAb = false, combinarIzq = false, combinarDer = false; // Comprobamos en que posicion estamos para no salirnos fuera de los rangos de la matriz if (fila == 0) comprobarArr = false; else if (fila == d_dimMatriz->numFilas - 1) comprobarAb = false; if (columna == 0) comprobarIzq = false; else if (columna == d_dimMatriz->numColumnas - 1) comprobarDer = false; int elementoEstudiado; if (comprobarArr) { elementoEstudiado = getElemento(d_matriz, fila - 1, columna, d_dimMatriz); if (elementoEstudiado == elemento) combinarArr = true; } if (comprobarAb) { elementoEstudiado = getElemento(d_matriz, fila + 1, columna, d_dimMatriz); if (elementoEstudiado == elemento) combinarAb = true; } if (comprobarDer) { elementoEstudiado = getElemento(d_matriz, fila, columna + 1, d_dimMatriz); if (elementoEstudiado == elemento) combinarDer = true; } if (comprobarIzq) { elementoEstudiado = getElemento(d_matriz, fila, columna - 1, d_dimMatriz); if (elementoEstudiado == elemento) combinarIzq = true; } seguirJugando_aux = combinarArr || combinarAb || combinarIzq || combinarDer; if (seguirJugando_aux) *seguirJugando = seguirJugando_aux; } // -------- FUNCIONES AUX HOST ----------- // __host__ void leerParametros(int argc, const char* argv[]) /* Parsea los parámetros introducidos en la llamada al programa por consola, seteando las variables del juego. */ { if ((argc != 5) || ((argv[1][0] != 'a') && (argv[1][0] != 'm')) || ((argv[2][0] != 'f') && (argv[2][0] != 'd'))) { cout << "Error en la introduccion de parametros, los parametros son:\nautomatico/manual (a/m), facil/dificil (f/d), num_filas, num_columnas\n\nUso = nombreprograma a/m f/d num_filas num_columnas\n" << endl; exit(1); } else { dimMatriz.numFilas = atoi(argv[3]); dimMatriz.numColumnas = atoi(argv[4]); if (dimMatriz.numFilas != dimMatriz.numColumnas) { cout << "El numero de filas y de columnas no puede ser distinto, crack." << endl; exit(2); } bytesMatriz = atoi(argv[3]) * atoi(argv[4]) * sizeof(int); // Se dimensionan los hilos y los grids de bloques if (dimMatriz.numFilas % 2 == 0) { dim3 bloques(2, 2); dim3 hilos(dimMatriz.numFilas / 2, dimMatriz.numColumnas / 2); dimGrid = bloques; dimBlock = hilos; } else { dim3 bloques(1, 1); dim3 hilos(dimMatriz.numFilas, dimMatriz.numColumnas); dimGrid = bloques; dimBlock = hilos; } if (argv[1][0] == 'a') automatico = true; else automatico = false; if (argv[2][0] == 'f') modoDiablo = false; else modoDiablo = true; } } __host__ void pintarMatriz(int *h_matriz) /* Dada una matriz, la dibuja por pantalla. */ { HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE); for (size_t i = 0; i < dimMatriz.numColumnas; i++) { SetConsoleTextAttribute(hConsole, 14); cout << ("+-------"); } cout << "+" << endl; for (int i = 0; i < dimMatriz.numColumnas; i++) { for (int j = 0; j < dimMatriz.numFilas; j++) { // La funcion de print evalua en su interior si deberia poner un /t o no, dependiendo de la longitud del numero printf("[%d%s]", *(h_matriz + i * dimMatriz.numColumnas + j), *(h_matriz + i * dimMatriz.numColumnas + j) % 100000 == *(h_matriz + i * dimMatriz.numColumnas + j) ? "\t" : ""); } printf("\n"); } for (size_t i = 0; i < dimMatriz.numColumnas; i++) { cout << ("+-------"); } cout << "+" << endl; SetConsoleTextAttribute(hConsole, 15); } __host__ void caracteristicasTarjeta() /* Saca por pantalla las caracteristicas de todas las tarjetas graficas del pc */ { // Recojo el número de tarjetas de la gráfica int numTarjetas; cudaGetDeviceCount(&numTarjetas); // Para cada una, imprimo sus características for (int i = 0; i < numTarjetas; i++) { cudaDeviceProp caracteristicas; cudaGetDeviceProperties(&caracteristicas, i); printf("Numero de dispositivo: %d\n", i); printf(" Nombre del dispositivo: %s\n", caracteristicas.name); printf(" Frecuencia del reloj de memoria (KHz): %d\n", caracteristicas.memoryClockRate); printf(" Interfaz de memoria (bits): %d\n", caracteristicas.memoryBusWidth); printf(" Ancho de banda de memoria (GB/s): %f\n", 2.0*caracteristicas.memoryClockRate*(caracteristicas.memoryBusWidth / 8) / 1.0e6); } } // ------- OP. CON MATRIZ EN HOST ------- // __host__ void inicializarMatriz(int *h_matriz) /* Dada una matriz, la rellena con 0s, 2s, 4s u 8s, aleatoriamente y dependiendo del nivel de dificultad elegido. */ { srand(time(NULL)); // Contador de casillas rellenadas. Dependiendo de la dificultad, tiene un tope distinto. int contadorSemillas = 0; int *posicionAleatoria; if (modoDiablo) { int array_posibles_numeros[] = { 2,4 }; while ((contadorSemillas < 8) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4) contadorSemillas++; // Sumo uno al contador de semillas } } } else { int array_posibles_numeros[] = { 2,4,8 }; while ((contadorSemillas < 15) && (contadorSemillas < dimMatriz.numFilas * dimMatriz.numColumnas)) // Mientras no se hayan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8) contadorSemillas++; // Sumo uno al contador de semillas } } } } __host__ void rellenarMatrizconcero(int *h_matriz) /* Dada una matriz, la rellena con 0s. */ { for (int i = 0; i < dimMatriz.numColumnas; ++i) { for (int j = 0; j < dimMatriz.numFilas; ++j) { *(h_matriz + i * dimMatriz.numColumnas + j) = 0; } } } __host__ void copiarMatriz(int *h_matrizCopia, int *h_matrizPega) /* Copia matriz de copia en matriz de pega. */ { // Punteros a matrices en DEVICE: int *d_matrizCopia; int *d_matrizPega; dimensionesMatriz* d_dimMatriz; // Reservo memoria en DEVICE: cudaMalloc((void **)&d_matrizCopia, bytesMatriz); cudaMalloc((void **)&d_matrizPega, bytesMatriz); cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); // Muevo matrices de HOST a DEVICE: cudaMemcpy(d_matrizCopia, h_matrizCopia, bytesMatriz, cudaMemcpyHostToDevice); cudaMemcpy(d_matrizPega, h_matrizPega, bytesMatriz, cudaMemcpyHostToDevice); cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice); // Primero, copio salida a entrada. kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizCopia, d_matrizPega, d_dimMatriz); cudaDeviceSynchronize(); // Despu�s, pongo a 0 la matriz de copia. kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizCopia, d_dimMatriz); cudaDeviceSynchronize(); // Devolvemos resultado de DEVICE a HOST: cudaMemcpy(h_matrizPega, d_matrizPega, bytesMatriz, cudaMemcpyDeviceToHost); cudaMemcpy(h_matrizCopia, d_matrizCopia, bytesMatriz, cudaMemcpyDeviceToHost); // Libero memoria de DEVICE: cudaFree(d_matrizPega); cudaFree(d_matrizCopia); cudaFree(d_dimMatriz); } __host__ bool desplazarMatriz(int *h_matrizEntrada, int *h_matrizSalida, int* d_matrizEntrada, int* d_matrizSalida, int* h_puntuacion, int despVertical, int despHorizontal) { int* d_despVertical = 0; int* d_despHorizontal = 0; int* d_puntuacion = 0; dimensionesMatriz* d_dimMatriz; // Reservo memoria en DEVICE: cudaMalloc((void **)&d_despVertical, sizeof(int)); cudaMalloc((void **)&d_despHorizontal, sizeof(int)); cudaMalloc((void **)&d_puntuacion, sizeof(int)); cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); // Muevo matrices de HOST a DEVICE: cudaMemcpy(d_matrizEntrada, h_matrizEntrada, bytesMatriz, cudaMemcpyHostToDevice); cudaMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, cudaMemcpyHostToDevice); cudaMemcpy(d_puntuacion, h_puntuacion, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice); cudaMemcpy(d_despVertical, &despVertical, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_despHorizontal, &despHorizontal, sizeof(int), cudaMemcpyHostToDevice); // Realizo la suma: kernelSuma << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_puntuacion, d_despVertical, d_despHorizontal, d_dimMatriz); // Espero a que termine de operar: cudaDeviceSynchronize(); cudaMemcpy(h_puntuacion, d_puntuacion, sizeof(int), cudaMemcpyDeviceToHost); // Variable que dice si las matrices son iguales o no. bool h_iguales = true; bool *d_iguales; cudaMalloc((void **)&d_iguales, sizeof(bool)); // Mientras la matriz de entrada sea distinta de salida, // significa que puedo seguir desplazando. // Cuando sean iguales, detengo el bucle. do { // Primero, copio salida a entrada. kernelCopiarMatriz << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_dimMatriz); cudaDeviceSynchronize(); // Segundo, seteo salida a 0. kernelSetMatrizCeros << < dimGrid, dimBlock >> > (d_matrizSalida, d_dimMatriz); cudaDeviceSynchronize(); // Desplazo kernelDesplazar << < dimGrid, dimBlock >> > (d_matrizEntrada, d_matrizSalida, d_despVertical, d_despHorizontal, d_dimMatriz); cudaDeviceSynchronize(); // Compruebo si tengo que seguir desplazando. // Doy por hecho que son iguales. Si no lo son, desplazare. h_iguales = true; // Muevo a device. cudaMemcpy(d_iguales, &h_iguales, sizeof(bool), cudaMemcpyHostToDevice); // Veo si son iguales. kernelComprobarIguales << < dimGrid, dimBlock >> > (d_matrizSalida, d_matrizEntrada, d_iguales, d_dimMatriz); cudaDeviceSynchronize(); // Limpio memoria tras trastear con d_iguales. cudaMemcpy(&h_iguales, d_iguales, sizeof(bool), cudaMemcpyDeviceToHost); } while (!h_iguales); cudaFree(d_iguales); // Compruebo si la matriz está llena y si se puede mover en cualq. dirección bool h_movimientosPosibles = true; // Devolvemos resultado de DEVICE a HOST: cudaMemcpy(h_matrizSalida, d_matrizSalida, bytesMatriz, cudaMemcpyDeviceToHost); // Si esta llena compruebo si hay movimientos posibles if (estaLlena(d_matrizSalida)) h_movimientosPosibles = movimientosPosibles(d_matrizSalida); // Si no, añado una nueva semilla a la matriz resultante en host else { nuevaSemilla(h_matrizSalida, 1); // Añadimos la nueva semilla // Comprobamos si con la nueva semilla anadida, hemos perdido cudaMemcpy(d_matrizSalida, h_matrizSalida, bytesMatriz, cudaMemcpyHostToDevice); if (estaLlena(d_matrizSalida)) h_movimientosPosibles = movimientosPosibles(d_matrizSalida); } // Libero memoria de DEVICE: cudaFree(d_despVertical); cudaFree(d_despHorizontal); cudaFree(d_dimMatriz); return h_movimientosPosibles; } __host__ void setElementoHost(int *h_matriz, int fila, int columna, int elemento) /* Dada una matriz, escribe el elemento en [fila][columna] */ { h_matriz[fila * dimMatriz.numColumnas + columna] = elemento; } __host__ void nuevaSemilla(int *h_matriz, int numSemillas) /* Crea numSemillas nuevas semillas en la matriz almacenada en device */ { int *posicionAleatoria; bool semillaGenerada = false; if (modoDiablo) { int array_posibles_numeros[] = { 2,4 }; while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hallan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 2]; // Cambio ese cero por un numero aleatorio entre los candidatos (2 o 4) semillaGenerada = true; numSemillas--; } } } else { int array_posibles_numeros[] = { 2,4,8 }; while ((!semillaGenerada) && (numSemillas != 0)) // Mientras no se haya encontrado una posicion con 0 y no se hayan lanzado todas las semillas { posicionAleatoria = h_matriz + (rand() % dimMatriz.numColumnas) * dimMatriz.numColumnas + (rand() % dimMatriz.numFilas); // Calculo una posicion aleatoria donde poner una de las semillas if (*posicionAleatoria == 0) // Si es 0 inicialmente, es decir, no ha sido escogida por segunda vez { *posicionAleatoria = array_posibles_numeros[rand() % 3]; // Cambio ese cero por un numero aleatorio entre los candidatos (2, 4 u 8) semillaGenerada = true; numSemillas--; } } } } // ------- COMPROBACIONES EN HOST ------- // __host__ bool estaLlena(int* d_matriz) { // Compruebo si la matriz esta llena bool h_estaLlena = true; bool *d_estaLlena; dimensionesMatriz* d_dimMatriz; cudaMalloc((void **)&d_estaLlena, sizeof(bool)); cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); cudaMemcpy(d_estaLlena, &h_estaLlena, sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice); // Veo si está llena. kernelComprobarLlena << < dimGrid, dimBlock >> > (d_matriz, d_estaLlena, d_dimMatriz); cudaDeviceSynchronize(); cudaMemcpy(&h_estaLlena, d_estaLlena, sizeof(bool), cudaMemcpyDeviceToHost); // Limpio memoria tras trastear con d_estaLlena. cudaFree(d_estaLlena); cudaFree(d_dimMatriz); return h_estaLlena; } __host__ bool finJuego(int* d_matriz) { // Compruebo si la matriz contiene algún 16384 bool h_haGanado = false; bool *d_haGanado; dimensionesMatriz* d_dimMatriz; cudaMalloc((void **)&d_haGanado, sizeof(bool)); cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); cudaMemcpy(d_haGanado, &h_haGanado, sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice); // Veo si está llena. kernelComprobarSiHaGanado << < dimGrid, dimBlock >> > (d_matriz, d_haGanado, d_dimMatriz); cudaDeviceSynchronize(); cudaMemcpy(&h_haGanado, d_haGanado, sizeof(bool), cudaMemcpyDeviceToHost); // Limpio memoria tras trastear con d_estaLlena. cudaFree(d_haGanado); cudaFree(d_dimMatriz); return h_haGanado; } __host__ bool movimientosPosibles(int* d_matriz) /* Llama al kernel de comprobacion de movimientos posibles */ { bool h_movimientosPosibles = false; dimensionesMatriz* d_dimMatriz; bool *d_movimientosPosibles; cudaMalloc((void **)&d_movimientosPosibles, sizeof(bool)); cudaMalloc((void **)&d_dimMatriz, sizeof(dimensionesMatriz)); cudaMemcpy(d_movimientosPosibles, &h_movimientosPosibles, sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(d_dimMatriz, &dimMatriz, sizeof(dimensionesMatriz), cudaMemcpyHostToDevice); // Compruebo si hay movimientos que se puedan hacer kernelComprobarMovimientosPosibles << < dimGrid, dimBlock >> > (d_matriz, d_movimientosPosibles, d_dimMatriz); cudaDeviceSynchronize(); // Paso el booleano a memoria del host y libero la memoria de device cudaMemcpy(&h_movimientosPosibles, d_movimientosPosibles, sizeof(bool), cudaMemcpyDeviceToHost); cudaFree(d_dimMatriz); cudaFree(d_movimientosPosibles); return h_movimientosPosibles; } // ----- GUARDADO Y LECTURA ----- // __host__ void escribirMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos) { FILE *archivo; // Preparo nombre: nombreJugador += ".txt"; char * nombreArchivo = new char[nombreJugador.length() + 1]; strcpy(nombreArchivo, nombreJugador.c_str()); // Abro archivo: archivo = fopen(nombreArchivo, "w"); if (archivo == NULL) { cout << "Error escribiendo partida. " << endl; } else { fprintf(archivo, "%d\n", dimMatriz.numFilas); fprintf(archivo, "%d\n", dimMatriz.numColumnas); fprintf(archivo, "%d\n", vidas); fprintf(archivo, "%d\n", *movimientos); fprintf(archivo, "%d\n", *puntuacion); for (int i = 0; i < dimMatriz.numColumnas; ++i) { for (int j = 0; j < dimMatriz.numFilas; ++j) { fprintf(archivo, "%d ", *(h_matriz + i * dimMatriz.numColumnas + j)); } fprintf(archivo, "\n"); } } fclose(archivo); } __host__ bool leerMatriz(int* h_matriz, string nombreJugador, int* puntuacion, int* movimientos) { // Cargo el archivo ifstream in(nombreJugador + ".txt"); bool lecturaCorrecta = true; // Si error if (!in) { cout << "Erro abriendo el archivo. La partida no existe, se iniciara una partida nueva." << endl; lecturaCorrecta = false; } // Si no, escribo matriz else { int a_filas, a_columnas; in >> a_filas; in >> a_columnas; in >> vidas; if (a_filas != dimMatriz.numFilas || a_columnas != dimMatriz.numColumnas) { cout << "La partida cargada no es congruente con el numero de filas/columnas pasada como parametro." << endl; cout << "Se iniciara una partida nueva." << endl; lecturaCorrecta = false; } else { // Cargo movimientos y puntuacion in >> *movimientos; in >> *puntuacion; for (int fila = 0; fila < dimMatriz.numFilas; fila++) { for (int columna = 0; columna < dimMatriz.numColumnas; columna++) { // Parseo el numero int num; in >> num; // Lo escribo en la posicion setElementoHost(h_matriz, fila, columna, num); } } } } // Cierro archivo in.close(); return lecturaCorrecta; }
175a2d46ad9dc585745fe8d6558b73cbb3210e3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // compute any intermediate results in floating point float result = 0.f; // get positions const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; // handle OOBs if((thread_2D_pos.x < numCols) && (thread_2D_pos.y < numRows)) { // square filter const int filter_offset = filterWidth/2; // sum the window pixels for(int i = -filter_offset; i <= filter_offset; i++) { for(int j = -filter_offset; j <= filter_offset; j++) { // clamp for out of range access int clamped_i = min(max(thread_2D_pos.y + i, 0), static_cast<int>(numRows - 1)); int clamped_j = min(max(thread_2D_pos.x + j, 0), static_cast<int>(numCols - 1)); //clamp(i, 0, numRows-1); clamp(j, 0, numCols-1); //Cg // result += tex2D(tex8u,thread_2D_pos.y + i, thread_2D_pos.x + j); // tex2D option float image_value = static_cast<float>(inputChannel[clamped_i * numCols + clamped_j]); float filter_value = filter[(i + filter_offset) * filterWidth + j + filter_offset]; result += image_value * filter_value; } } // store final result as unsigned char outputChannel[thread_1D_pos] = static_cast<unsigned char>(result); } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { //position const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //OOB if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); //store in output image outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //allocate memory for the filter on the GPU checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory just allocated checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice ) ); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const dim3 blockSize(16, 16); //Compute grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize((numCols + blockSize.x - 1)/blockSize.x, (numRows + blockSize.y - 1)/blockSize.y); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //call convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now recombine results. We take care of launching this kernel for you. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
175a2d46ad9dc585745fe8d6558b73cbb3210e3c.cu
#include "reference_calc.cpp" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // compute any intermediate results in floating point float result = 0.f; // get positions const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; // handle OOBs if((thread_2D_pos.x < numCols) && (thread_2D_pos.y < numRows)) { // square filter const int filter_offset = filterWidth/2; // sum the window pixels for(int i = -filter_offset; i <= filter_offset; i++) { for(int j = -filter_offset; j <= filter_offset; j++) { // clamp for out of range access int clamped_i = min(max(thread_2D_pos.y + i, 0), static_cast<int>(numRows - 1)); int clamped_j = min(max(thread_2D_pos.x + j, 0), static_cast<int>(numCols - 1)); //clamp(i, 0, numRows-1); clamp(j, 0, numCols-1); //Cg // result += tex2D(tex8u,thread_2D_pos.y + i, thread_2D_pos.x + j); // tex2D option float image_value = static_cast<float>(inputChannel[clamped_i * numCols + clamped_j]); float filter_value = filter[(i + filter_offset) * filterWidth + j + filter_offset]; result += image_value * filter_value; } } // store final result as unsigned char outputChannel[thread_1D_pos] = static_cast<unsigned char>(result); } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { //position const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //OOB if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); //store in output image outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //allocate memory for the filter on the GPU checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //Copy the filter on the host (h_filter) to the memory just allocated checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice ) ); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const dim3 blockSize(16, 16); //Compute grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize((numCols + blockSize.x - 1)/blockSize.x, (numRows + blockSize.y - 1)/blockSize.y); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //call convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now recombine results. We take care of launching this kernel for you. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
996e4e5f45d9292fdcb9f8467b949c766549f3c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <hipcub/hipcub.hpp> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { struct compressed_stream_s { CompressedStreamInfo info; gpu_inflate_input_s ctl; }; // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData( CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; uint8_t* uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t max_uncompressed_block_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + BLOCK_HEADER_SIZE < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size; gpu_inflate_input_s* init_ctl = nullptr; block_len >>= 1; cur += BLOCK_HEADER_SIZE; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; max_uncompressed_block_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_ctl = s->info.copyctl; init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &init_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_ctl = s->info.decctl; init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &init_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_ctl) { s->ctl.srcDevice = const_cast<uint8_t*>(cur); s->ctl.srcSize = block_len; s->ctl.dstDevice = uncompressed + max_uncompressed_size; s->ctl.dstSize = uncompressed_size; } __syncwarp(); if (init_ctl && lane_id == 0) *init_ctl = s->ctl; cur += block_len; max_uncompressed_size += uncompressed_size; max_uncompressed_block_size = max(max_uncompressed_block_size, uncompressed_size); } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; s->info.max_uncompressed_block_size = max_uncompressed_block_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; const gpu_inflate_input_s* dec_in = s->info.decctl; const gpu_inflate_status_s* dec_out = s->info.decstatus; uint8_t* uncompressed_actual = s->info.uncompressed_data; uint8_t* uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + BLOCK_HEADER_SIZE < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += BLOCK_HEADER_SIZE; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) { // Decompression failed, not much point in doing anything else break; } uncompressed_size_est = shuffle((lane_id == 0) ? *(const uint32_t*)&dec_in[num_compressed_blocks].dstSize : 0); uncompressed_size_actual = shuffle( (lane_id == 0) ? *(const uint32_t*)&dec_out[num_compressed_blocks].bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s, const uint8_t* start, const uint8_t* end) { constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8; const uint8_t* cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == PB_TYPE_FIXED64) cur += 8; else if (v == PB_TYPE_FIXED32) cur += 4; else if (v == PB_TYPE_VARINT) state = SKIP_VARINT; else if (v == PB_TYPE_FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT; idx_id++; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups) { const uint8_t* index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { const uint8_t* start = s->strm_info[ci_id].compressed_data; const uint8_t* cur = start; const uint8_t* end = cur + s->strm_info[ci_id].compressed_data_size; gpu_inflate_status_s* decstatus = s->strm_info[ci_id].decstatus; uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len, is_uncompressed; if (cur + BLOCK_HEADER_SIZE > end || cur + BLOCK_HEADER_SIZE >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += BLOCK_HEADER_SIZE; is_uncompressed = block_len & 1; block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += decstatus->bytes_written; decstatus++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] rowidx_stride Row index stride * @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed * value */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s* const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != NULL); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { auto const num_rows = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows; auto const start_row = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row; for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t*)&s->rowgroups[i])[j]; } row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows; // Updating in case of struct row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows; row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row; } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } template <int block_size> __global__ void __launch_bounds__(block_size) gpu_reduce_pushdown_masks(device_span<orc_column_device_view const> orc_columns, device_2dspan<rowgroup_rows const> rowgroup_bounds, device_2dspan<size_type> set_counts) { typedef hipcub::BlockReduce<size_type, block_size> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; auto const column_id = blockIdx.x; auto const rowgroup_id = blockIdx.y; auto const column = orc_columns[column_id]; auto const t = threadIdx.x; auto const use_child_rg = column.type().id() == type_id::LIST; auto const rg = rowgroup_bounds[rowgroup_id][column_id + (use_child_rg ? 1 : 0)]; if (column.pushdown_mask == nullptr) { // All elements are valid if the null mask is not present if (t == 0) { set_counts[rowgroup_id][column_id] = rg.size(); } return; }; size_type count = 0; static constexpr size_type bits_per_word = sizeof(bitmask_type) * 8; for (auto row = t * bits_per_word + rg.begin; row < rg.end; row += block_size * bits_per_word) { auto const begin_bit = row; auto const end_bit = min(static_cast<size_type>(row + bits_per_word), rg.end); auto const mask_len = end_bit - begin_bit; auto const mask_word = cudf::detail::get_mask_offset_word(column.pushdown_mask, 0, row, end_bit) & ((1 << mask_len) - 1); count += __popc(mask_word); } count = BlockReduce(temp_storage).Sum(count); if (t == 0) { set_counts[rowgroup_id][column_id] = count; } } void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuParseCompressedStripeData), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams, compression_block_size, log2maxcr); } void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuPostDecompressionReassemble), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams); } void __host__ ParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block hipLaunchKernelGGL(( gpuParseRowGroupIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride, use_base_stride); } void __host__ reduce_pushdown_masks(device_span<orc_column_device_view const> columns, device_2dspan<rowgroup_rows const> rowgroups, device_2dspan<cudf::size_type> valid_counts, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(columns.size(), rowgroups.size().first); // 1 rowgroup per block hipLaunchKernelGGL(( gpu_reduce_pushdown_masks<128>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), columns, rowgroups, valid_counts); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
996e4e5f45d9292fdcb9f8467b949c766549f3c4.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <cub/cub.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { struct compressed_stream_s { CompressedStreamInfo info; gpu_inflate_input_s ctl; }; // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData( CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; uint8_t* uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t max_uncompressed_block_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + BLOCK_HEADER_SIZE < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size; gpu_inflate_input_s* init_ctl = nullptr; block_len >>= 1; cur += BLOCK_HEADER_SIZE; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; max_uncompressed_block_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_ctl = s->info.copyctl; init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &init_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_ctl = s->info.decctl; init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &init_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_ctl) { s->ctl.srcDevice = const_cast<uint8_t*>(cur); s->ctl.srcSize = block_len; s->ctl.dstDevice = uncompressed + max_uncompressed_size; s->ctl.dstSize = uncompressed_size; } __syncwarp(); if (init_ctl && lane_id == 0) *init_ctl = s->ctl; cur += block_len; max_uncompressed_size += uncompressed_size; max_uncompressed_block_size = max(max_uncompressed_block_size, uncompressed_size); } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; s->info.max_uncompressed_block_size = max_uncompressed_block_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks const uint8_t* cur = s->info.compressed_data; const uint8_t* end = cur + s->info.compressed_data_size; const gpu_inflate_input_s* dec_in = s->info.decctl; const gpu_inflate_status_s* dec_out = s->info.decstatus; uint8_t* uncompressed_actual = s->info.uncompressed_data; uint8_t* uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + BLOCK_HEADER_SIZE < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); uint32_t is_uncompressed = block_len & 1; uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += BLOCK_HEADER_SIZE; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) { // Decompression failed, not much point in doing anything else break; } uncompressed_size_est = shuffle((lane_id == 0) ? *(const uint32_t*)&dec_in[num_compressed_blocks].dstSize : 0); uncompressed_size_actual = shuffle( (lane_id == 0) ? *(const uint32_t*)&dec_out[num_compressed_blocks].bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s, const uint8_t* start, const uint8_t* end) { constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8; const uint8_t* cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT, pos_end = 0; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == PB_TYPE_FIXED64) cur += 8; else if (v == PB_TYPE_FIXED32) cur += 4; else if (v == PB_TYPE_VARINT) state = SKIP_VARINT; else if (v == PB_TYPE_FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA : (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2 : CI_PRESENT; idx_id++; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups) { const uint8_t* index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { const uint8_t* start = s->strm_info[ci_id].compressed_data; const uint8_t* cur = start; const uint8_t* end = cur + s->strm_info[ci_id].compressed_data_size; gpu_inflate_status_s* decstatus = s->strm_info[ci_id].decstatus; uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len, is_uncompressed; if (cur + BLOCK_HEADER_SIZE > end || cur + BLOCK_HEADER_SIZE >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += BLOCK_HEADER_SIZE; is_uncompressed = block_len & 1; block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += decstatus->bytes_written; decstatus++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] rowidx_stride Row index stride * @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed * value */ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s* const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != NULL); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { auto const num_rows = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows; auto const start_row = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row; for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t*)&s->rowgroups[i])[j]; } row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows; // Updating in case of struct row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows; row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row; } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } template <int block_size> __global__ void __launch_bounds__(block_size) gpu_reduce_pushdown_masks(device_span<orc_column_device_view const> orc_columns, device_2dspan<rowgroup_rows const> rowgroup_bounds, device_2dspan<size_type> set_counts) { typedef cub::BlockReduce<size_type, block_size> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; auto const column_id = blockIdx.x; auto const rowgroup_id = blockIdx.y; auto const column = orc_columns[column_id]; auto const t = threadIdx.x; auto const use_child_rg = column.type().id() == type_id::LIST; auto const rg = rowgroup_bounds[rowgroup_id][column_id + (use_child_rg ? 1 : 0)]; if (column.pushdown_mask == nullptr) { // All elements are valid if the null mask is not present if (t == 0) { set_counts[rowgroup_id][column_id] = rg.size(); } return; }; size_type count = 0; static constexpr size_type bits_per_word = sizeof(bitmask_type) * 8; for (auto row = t * bits_per_word + rg.begin; row < rg.end; row += block_size * bits_per_word) { auto const begin_bit = row; auto const end_bit = min(static_cast<size_type>(row + bits_per_word), rg.end); auto const mask_len = end_bit - begin_bit; auto const mask_word = cudf::detail::get_mask_offset_word(column.pushdown_mask, 0, row, end_bit) & ((1 << mask_len) - 1); count += __popc(mask_word); } count = BlockReduce(temp_storage).Sum(count); if (t == 0) { set_counts[rowgroup_id][column_id] = count; } } void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuParseCompressedStripeData<<<dim_grid, dim_block, 0, stream.value()>>>( strm_info, num_streams, compression_block_size, log2maxcr); } void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuPostDecompressionReassemble<<<dim_grid, dim_block, 0, stream.value()>>>(strm_info, num_streams); } void __host__ ParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block gpuParseRowGroupIndex<<<dim_grid, dim_block, 0, stream.value()>>>(row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride, use_base_stride); } void __host__ reduce_pushdown_masks(device_span<orc_column_device_view const> columns, device_2dspan<rowgroup_rows const> rowgroups, device_2dspan<cudf::size_type> valid_counts, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(columns.size(), rowgroups.size().first); // 1 rowgroup per block gpu_reduce_pushdown_masks<128> <<<dim_grid, dim_block, 0, stream.value()>>>(columns, rowgroups, valid_counts); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
c7c54ab6c129f8b04acb365ed959cb6fe03f4afa.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables // Functions void CleanupResources(void); void RandomInit_int(unsigned*, int); void RandomInit_fp(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(unsigned *A, unsigned *B, int N, int iterations) { int id = blockDim.x * blockIdx.x + threadIdx.x; unsigned sum=0; if(id < N){ for(unsigned i=0; i<iterations; ++i){ A[id] = A[id] * B[id] * id; for(unsigned j=0; j<iterations/4; ++j){ sum *= A[id]; } A[id] = sum*A[id]*B[id]; } } } __global__ void PowerKernalEmpty(unsigned* C, int iterations) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) { //Value1=(I1)+k; //Value2=(I2)+k; //Value3=(Value2)+k; //Value2=(Value1)+k; /* __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); */ } C[id]=id; __syncthreads(); } // Host code unsigned *h_A1, *h_A2, *h_A3; unsigned *d_A1, *d_A2, *d_A3; int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2; // Allocate input vectors h_A and h_B in host memory size_t size1 = N * sizeof(unsigned); h_A1 = (unsigned*)malloc(size1); if (h_A1 == 0) CleanupResources(); h_A2 = (unsigned*)malloc(size1); if (h_A2 == 0) CleanupResources(); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); // Initialize input vectors RandomInit_int(h_A1, N); RandomInit_int(h_A2, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A1, size1) ); checkCudaErrors( hipMalloc((void**)&d_A2, size1) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N, iterations); checkCudaErrors(hipEventRecord(stop)); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, iterations); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A1) hipFree(d_A1); if (d_A2) hipFree(d_A2); if (d_A3) hipFree(d_A3); // Free host memory if (h_A1) free(h_A1); if (h_A2) free(h_A2); if (h_A3) free(h_A3); } // Allocates an array with random float entries. void RandomInit_int(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } } void RandomInit_fp(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
c7c54ab6c129f8b04acb365ed959cb6fe03f4afa.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables // Functions void CleanupResources(void); void RandomInit_int(unsigned*, int); void RandomInit_fp(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(unsigned *A, unsigned *B, int N, int iterations) { int id = blockDim.x * blockIdx.x + threadIdx.x; unsigned sum=0; if(id < N){ for(unsigned i=0; i<iterations; ++i){ A[id] = A[id] * B[id] * id; for(unsigned j=0; j<iterations/4; ++j){ sum *= A[id]; } A[id] = sum*A[id]*B[id]; } } } __global__ void PowerKernalEmpty(unsigned* C, int iterations) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) { //Value1=(I1)+k; //Value2=(I2)+k; //Value3=(Value2)+k; //Value2=(Value1)+k; /* __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); */ } C[id]=id; __syncthreads(); } // Host code unsigned *h_A1, *h_A2, *h_A3; unsigned *d_A1, *d_A2, *d_A3; int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2; // Allocate input vectors h_A and h_B in host memory size_t size1 = N * sizeof(unsigned); h_A1 = (unsigned*)malloc(size1); if (h_A1 == 0) CleanupResources(); h_A2 = (unsigned*)malloc(size1); if (h_A2 == 0) CleanupResources(); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); // Initialize input vectors RandomInit_int(h_A1, N); RandomInit_int(h_A2, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A1, size1) ); checkCudaErrors( cudaMalloc((void**)&d_A2, size1) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start)); PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N, iterations); checkCudaErrors(cudaEventRecord(stop)); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, iterations); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A1) cudaFree(d_A1); if (d_A2) cudaFree(d_A2); if (d_A3) cudaFree(d_A3); // Free host memory if (h_A1) free(h_A1); if (h_A2) free(h_A2); if (h_A3) free(h_A3); } // Allocates an array with random float entries. void RandomInit_int(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } } void RandomInit_fp(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
f89941881321f7d049578c9183442dcb26b5c341.hip
// !!! This is a file automatically generated by hipify!!! // Solve ATP Transport on Tissue Volume Using GPGPU Acceleration #include <iostream> #include <string> #include <hip/hip_runtime.h> #include "PDETools.h" #include "PDEsolve.h" #include "configuration.h" using namespace std; int main(int argc,char** argv) { // Boundary Condition (user input) float C0 = 1e-7; // <M>, // Output Filename (user input) string dir = "out/ATP/"; string filename = "test"; // Initialize Physical Constants (user input) float D = 5.035e-6f; // diffusivity <cm^2/s> float q = 1.306e-1f; // blood vessel permeability <1/s> float Cb = 3.48e-7f; // blood concentration <M> float Vmax = 4.37e-8f; // maximal consumption <M/s> float Km = 1.43e-4f; // Michaelis-Menton Constant <M> float L = 0.2f; // tissue length <cm> float H = 0.2f; // tissue height <cm> float W = 0.06f; // tissue depth <cm> float l = 0.06f; // window length <cm> float h = 0.03f; // window width <cm> // Simulation Time (user input) float sim_time = 181.0f; // simulation time <s> if (argc == 2) sim_time = atof(argv[1]); float print_frequency = 1.0f; // print frequency <s> // Write-Out Schedule // 0-10s: 1s, 10-30s: 5s, 30-180s: 30s print_scheduler print_time(print_frequency); print_time.schedule(10.0f,5.0f); // (start_time <s>,frequency <s>) print_time.schedule(30.0f,30.0f); // Initialize Computational Domain (user input) int Nx = 144; int Ny = 144; int Nz = 48; float dt = 1e-6; // Calculate Dimensionless Parameters float tau = L*L/D; float alpha = q*tau; float ub = Cb/C0; float beta = Vmax/C0*tau; float km = Km/C0; float ay = H/L; float az = W/L; // Calculate Steady-State Solution float u0 = 0.5*sqrt((km-ub+beta/alpha)*(km-ub+beta/alpha)+4*ub*km)-0.5*(km-ub+beta/alpha); // Calculate Computational Parameters model mdl(alpha,beta,ub,km); grid grd(Nx,Ny,Nz,dt,ay,az); geometry geo(L,W,H,l,h); int N = Nx*Ny*Nz; size_t size = N*sizeof(float); float dx = 1.0f/(Nx-1.0f); float dy = ay/(Ny-1.0f); float dz = az/(Nz-1.0f); float T = sim_time/tau; // Print Parameters float ds = min(min(dx,dy),dz); cout << "\n\n---ATP---\n\n"; cout << "\n\nSimulation Parameters\n\n"; cout << "Runtime: " << sim_time << " s \t\t[" << T/dt << " time steps]\n\n"; cout << "dt/dx^2 = " << dt/ds/ds << endl << endl; cout << "tau = " << tau << " s\n"; cout << "C0 = " << C0 << " M\n\n"; cout << "u0 = " << u0 << endl; cout << "alpha = " << alpha << endl; cout << "ub = " << ub << endl; cout << "beta = " << beta << endl; cout << "km = " << km << endl << endl; // Allocate Memory on Host float* u_h = new float[N](); constIC(u_h,u0,Nx,Ny,Nz); print(u_h,N,dir+filename+"0.csv"); // Allocate Memory on Device float *uold_d,*unew_d; hipMalloc((void**) &uold_d,size); hipMalloc((void**) &unew_d,size); // Copy Memory to Device hipMemcpy(uold_d,u_h,size,hipMemcpyHostToDevice); // Setup Configuration for GPU dim3 dimGrid(GRID_SIZE_X,GRID_SIZE_Y,GRID_SIZE_Z); dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y,BLOCK_SIZE_Z); // Time Iteration float t = 0.0f; int np = 1; time_writer write_time(dir+"t.csv"); write_time(t*tau); float uwin = 1.0f; // Boundary Condition for (int nt = 1; t < T; nt++) { // Call GPU Kernel hipLaunchKernelGGL(( step), dim3(dimGrid),dim3(dimBlock), 0, 0, uold_d,unew_d,uwin,mdl,grd,geo); t += dt; // Print Solution if (print_time(t*tau)) { cout << "Writing t = " << t*tau << " s...\n"; hipMemcpy(u_h,unew_d,size,hipMemcpyDeviceToHost); print(u_h,N,dir+filename+to_string(np)+".csv"); write_time(t*tau); np++; } } // Delete Pointers delete[] u_h; hipFree(uold_d); hipFree(unew_d); return 0; }
f89941881321f7d049578c9183442dcb26b5c341.cu
// Solve ATP Transport on Tissue Volume Using GPGPU Acceleration #include <iostream> #include <string> #include <cuda.h> #include "PDETools.h" #include "PDEsolve.h" #include "configuration.h" using namespace std; int main(int argc,char** argv) { // Boundary Condition (user input) float C0 = 1e-7; // <M>, // Output Filename (user input) string dir = "out/ATP/"; string filename = "test"; // Initialize Physical Constants (user input) float D = 5.035e-6f; // diffusivity <cm^2/s> float q = 1.306e-1f; // blood vessel permeability <1/s> float Cb = 3.48e-7f; // blood concentration <M> float Vmax = 4.37e-8f; // maximal consumption <M/s> float Km = 1.43e-4f; // Michaelis-Menton Constant <M> float L = 0.2f; // tissue length <cm> float H = 0.2f; // tissue height <cm> float W = 0.06f; // tissue depth <cm> float l = 0.06f; // window length <cm> float h = 0.03f; // window width <cm> // Simulation Time (user input) float sim_time = 181.0f; // simulation time <s> if (argc == 2) sim_time = atof(argv[1]); float print_frequency = 1.0f; // print frequency <s> // Write-Out Schedule // 0-10s: 1s, 10-30s: 5s, 30-180s: 30s print_scheduler print_time(print_frequency); print_time.schedule(10.0f,5.0f); // (start_time <s>,frequency <s>) print_time.schedule(30.0f,30.0f); // Initialize Computational Domain (user input) int Nx = 144; int Ny = 144; int Nz = 48; float dt = 1e-6; // Calculate Dimensionless Parameters float tau = L*L/D; float alpha = q*tau; float ub = Cb/C0; float beta = Vmax/C0*tau; float km = Km/C0; float ay = H/L; float az = W/L; // Calculate Steady-State Solution float u0 = 0.5*sqrt((km-ub+beta/alpha)*(km-ub+beta/alpha)+4*ub*km)-0.5*(km-ub+beta/alpha); // Calculate Computational Parameters model mdl(alpha,beta,ub,km); grid grd(Nx,Ny,Nz,dt,ay,az); geometry geo(L,W,H,l,h); int N = Nx*Ny*Nz; size_t size = N*sizeof(float); float dx = 1.0f/(Nx-1.0f); float dy = ay/(Ny-1.0f); float dz = az/(Nz-1.0f); float T = sim_time/tau; // Print Parameters float ds = min(min(dx,dy),dz); cout << "\n\n---ATP---\n\n"; cout << "\n\nSimulation Parameters\n\n"; cout << "Runtime: " << sim_time << " s \t\t[" << T/dt << " time steps]\n\n"; cout << "dt/dx^2 = " << dt/ds/ds << endl << endl; cout << "tau = " << tau << " s\n"; cout << "C0 = " << C0 << " M\n\n"; cout << "u0 = " << u0 << endl; cout << "alpha = " << alpha << endl; cout << "ub = " << ub << endl; cout << "beta = " << beta << endl; cout << "km = " << km << endl << endl; // Allocate Memory on Host float* u_h = new float[N](); constIC(u_h,u0,Nx,Ny,Nz); print(u_h,N,dir+filename+"0.csv"); // Allocate Memory on Device float *uold_d,*unew_d; cudaMalloc((void**) &uold_d,size); cudaMalloc((void**) &unew_d,size); // Copy Memory to Device cudaMemcpy(uold_d,u_h,size,cudaMemcpyHostToDevice); // Setup Configuration for GPU dim3 dimGrid(GRID_SIZE_X,GRID_SIZE_Y,GRID_SIZE_Z); dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y,BLOCK_SIZE_Z); // Time Iteration float t = 0.0f; int np = 1; time_writer write_time(dir+"t.csv"); write_time(t*tau); float uwin = 1.0f; // Boundary Condition for (int nt = 1; t < T; nt++) { // Call GPU Kernel step<<<dimGrid,dimBlock>>>(uold_d,unew_d,uwin,mdl,grd,geo); t += dt; // Print Solution if (print_time(t*tau)) { cout << "Writing t = " << t*tau << " s...\n"; cudaMemcpy(u_h,unew_d,size,cudaMemcpyDeviceToHost); print(u_h,N,dir+filename+to_string(np)+".csv"); write_time(t*tau); np++; } } // Delete Pointers delete[] u_h; cudaFree(uold_d); cudaFree(unew_d); return 0; }
ef1391b367cdd3ee642cc87ebe1fd220c1ab343d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sstream> #include <iomanip> #include <time.h> #include <opencv2/opencv.hpp> using namespace cv; using namespace std; const int MAX_ROW = 2200, MAX_COL = 4100; const int NUMBER_OF_BLOCKS = 4; int kernelSize, numberOfThreads; Mat image, newImage; int h_in[ 3 * MAX_ROW * MAX_COL ]; int h_out[ 3 * MAX_ROW * MAX_COL ]; int size; __global__ void blur( int * d_in, int * d_out, int rowsPerThread, int totalRow, int totalCol, int kernelSize ){ int fr = rowsPerThread * (blockDim.x * blockIdx.x + threadIdx.x); int to = fr + rowsPerThread; int newColor[3]; for( int row = fr; row < to && row < totalRow; ++row ){ for( int col = 0; col < totalCol; ++col ){ //Calculate the value of the pixel [row][col] newColor[0] = newColor[1] = newColor[2] = 0; for( int i = row - kernelSize/2; i <= row + kernelSize/2; ++i ){ for( int j = col - kernelSize/2; j <= col + kernelSize/2; ++j ){ for( int k = 0; k < 3; ++k ) { newColor[k] += d_in[ (totalCol*((i+totalRow)%totalRow) + ((j+totalCol)%totalCol))*3+k ]; } } } //Store it as the variable of a pixel for( int k = 0; k < 3; ++k ) { d_out[ (totalCol*row + col)*3+k ] = newColor[k] / (kernelSize*kernelSize); } } } } void storeImageData(){ Vec3b currentColor; for( int j = 0; j < image.rows; ++j ){ for( int i = 0; i < image.cols; ++i ){ currentColor = image.at<Vec3b>(Point( i, j )); for( int k = 0; k < 3; ++k ){ h_in[ (image.cols*j + i)*3+k ] = currentColor[ k ]; } } } } void saveNewImageData(){ newImage = Mat(image.rows, image.cols, CV_8UC3); for( int j = 0; j < image.rows; ++j ){ for( int i = 0; i < image.cols; ++i ){ Vec3b currentPixel; for( int k = 0; k < 3; ++k ){ currentPixel[ k ] = h_out[ (image.cols*j + i)*3+k ]; } newImage.at<Vec3b>(Point( i, j )) = currentPixel; } } } void displayImage( Mat &image ){ namedWindow("Display Image", WINDOW_AUTOSIZE ); imshow("Display Image", image); waitKey(0); } int main(int argc, char** argv ) { hipSetDevice(0); //start time struct timespec start, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &start); if ( argc != 4 ) { printf("usage: ./script.sh <Image_Path> <Kernel_Size> <Number_Threads>\n"); return -1; } //Read original image using path image = imread( argv[1], 1 ); if ( !image.data ) { printf("No image data \n"); return -1; } //Read kernel size stringstream ss1( argv[ 2 ] ); ss1 >> kernelSize; if( !(kernelSize&1) || kernelSize < 1 ){ printf( "Kernel size must be an odd positive integer.\n" ); return -1; } //Read number of threads stringstream ss2( argv[ 3 ] ); ss2 >> numberOfThreads; if( (numberOfThreads < 1) || (numberOfThreads&1) ){ printf( "Number of threads must be an even positive integer.\n" ); return -1; } //cout << "File\t\tKernel\t\tThreads\t\tTime(s)\n"; printf( "%s\t\t", argv[ 1 ] ); printf( "%d\t\t\t\t", kernelSize ); printf( "%d\t\t\t\t", numberOfThreads ); size = sizeof( int ) * 3 * MAX_COL * MAX_ROW; //Declaring pointers int * d_in, * d_out; //Alloc memory hipMalloc( (void **) &d_in, size ); hipMalloc( (void **) &d_out, size ); //Initialize variables storeImageData(); //Copy host to device hipMemcpy( d_in, &h_in, size, hipMemcpyHostToDevice ); //Launch kernel //blur(); hipLaunchKernelGGL(( blur), dim3(NUMBER_OF_BLOCKS), dim3(numberOfThreads/NUMBER_OF_BLOCKS) , 0, 0, d_in, d_out, (image.rows + numberOfThreads - 1)/numberOfThreads, image.rows, image.cols, kernelSize ); //Copy device to host hipMemcpy( &h_out, d_out, size, hipMemcpyDeviceToHost ); //Free memory hipFree( d_in ); hipFree( d_out ); //Create newImage with the matrix out saveNewImageData(); //calculate and print elapsed time clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - start.tv_sec); elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0; printf( "%.4f\n", elapsed ); //Display blurred image //displayImage( newImage ); return 0; }
ef1391b367cdd3ee642cc87ebe1fd220c1ab343d.cu
#include <stdio.h> #include <sstream> #include <iomanip> #include <time.h> #include <opencv2/opencv.hpp> using namespace cv; using namespace std; const int MAX_ROW = 2200, MAX_COL = 4100; const int NUMBER_OF_BLOCKS = 4; int kernelSize, numberOfThreads; Mat image, newImage; int h_in[ 3 * MAX_ROW * MAX_COL ]; int h_out[ 3 * MAX_ROW * MAX_COL ]; int size; __global__ void blur( int * d_in, int * d_out, int rowsPerThread, int totalRow, int totalCol, int kernelSize ){ int fr = rowsPerThread * (blockDim.x * blockIdx.x + threadIdx.x); int to = fr + rowsPerThread; int newColor[3]; for( int row = fr; row < to && row < totalRow; ++row ){ for( int col = 0; col < totalCol; ++col ){ //Calculate the value of the pixel [row][col] newColor[0] = newColor[1] = newColor[2] = 0; for( int i = row - kernelSize/2; i <= row + kernelSize/2; ++i ){ for( int j = col - kernelSize/2; j <= col + kernelSize/2; ++j ){ for( int k = 0; k < 3; ++k ) { newColor[k] += d_in[ (totalCol*((i+totalRow)%totalRow) + ((j+totalCol)%totalCol))*3+k ]; } } } //Store it as the variable of a pixel for( int k = 0; k < 3; ++k ) { d_out[ (totalCol*row + col)*3+k ] = newColor[k] / (kernelSize*kernelSize); } } } } void storeImageData(){ Vec3b currentColor; for( int j = 0; j < image.rows; ++j ){ for( int i = 0; i < image.cols; ++i ){ currentColor = image.at<Vec3b>(Point( i, j )); for( int k = 0; k < 3; ++k ){ h_in[ (image.cols*j + i)*3+k ] = currentColor[ k ]; } } } } void saveNewImageData(){ newImage = Mat(image.rows, image.cols, CV_8UC3); for( int j = 0; j < image.rows; ++j ){ for( int i = 0; i < image.cols; ++i ){ Vec3b currentPixel; for( int k = 0; k < 3; ++k ){ currentPixel[ k ] = h_out[ (image.cols*j + i)*3+k ]; } newImage.at<Vec3b>(Point( i, j )) = currentPixel; } } } void displayImage( Mat &image ){ namedWindow("Display Image", WINDOW_AUTOSIZE ); imshow("Display Image", image); waitKey(0); } int main(int argc, char** argv ) { cudaSetDevice(0); //start time struct timespec start, finish; double elapsed; clock_gettime(CLOCK_MONOTONIC, &start); if ( argc != 4 ) { printf("usage: ./script.sh <Image_Path> <Kernel_Size> <Number_Threads>\n"); return -1; } //Read original image using path image = imread( argv[1], 1 ); if ( !image.data ) { printf("No image data \n"); return -1; } //Read kernel size stringstream ss1( argv[ 2 ] ); ss1 >> kernelSize; if( !(kernelSize&1) || kernelSize < 1 ){ printf( "Kernel size must be an odd positive integer.\n" ); return -1; } //Read number of threads stringstream ss2( argv[ 3 ] ); ss2 >> numberOfThreads; if( (numberOfThreads < 1) || (numberOfThreads&1) ){ printf( "Number of threads must be an even positive integer.\n" ); return -1; } //cout << "File\t\tKernel\t\tThreads\t\tTime(s)\n"; printf( "%s\t\t", argv[ 1 ] ); printf( "%d\t\t\t\t", kernelSize ); printf( "%d\t\t\t\t", numberOfThreads ); size = sizeof( int ) * 3 * MAX_COL * MAX_ROW; //Declaring pointers int * d_in, * d_out; //Alloc memory cudaMalloc( (void **) &d_in, size ); cudaMalloc( (void **) &d_out, size ); //Initialize variables storeImageData(); //Copy host to device cudaMemcpy( d_in, &h_in, size, cudaMemcpyHostToDevice ); //Launch kernel //blur(); blur<<< NUMBER_OF_BLOCKS, numberOfThreads/NUMBER_OF_BLOCKS >>>( d_in, d_out, (image.rows + numberOfThreads - 1)/numberOfThreads, image.rows, image.cols, kernelSize ); //Copy device to host cudaMemcpy( &h_out, d_out, size, cudaMemcpyDeviceToHost ); //Free memory cudaFree( d_in ); cudaFree( d_out ); //Create newImage with the matrix out saveNewImageData(); //calculate and print elapsed time clock_gettime(CLOCK_MONOTONIC, &finish); elapsed = (finish.tv_sec - start.tv_sec); elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0; printf( "%.4f\n", elapsed ); //Display blurred image //displayImage( newImage ); return 0; }
8ade4d275ace36eacf5f6fd437726fd1fc867201.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Finalize(int size, int *originIn, int *originOut, int *bestSeenIn, int *bestSeenOut, int *adjIndexes, int *adjacency, int *mis, int *incomplete) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int bestSeen = bestSeenIn[idx]; int origin = originIn[idx]; if (bestSeen < 1000001) { int start = adjIndexes[idx]; int end = adjIndexes[idx + 1]; // Look at all the neighbors and take best values: for (int i = start; i < end; i++) { int neighbor = adjacency[i]; unsigned int challenger = bestSeenIn[neighbor]; int challengerOrigin = originIn[neighbor]; if (challenger > 0 && challenger == bestSeen && challengerOrigin > origin) { origin = challengerOrigin; } if (challenger > bestSeen) { bestSeen = challenger; origin = challengerOrigin; } } } // Write new MIS status int misStatus = -1; if (origin == idx) misStatus = 1; else if (bestSeen == 1000001) misStatus = 0; mis[idx] = misStatus; // If this node is still unassigned mark if (misStatus == -1) { incomplete[0] = 1; } } }
8ade4d275ace36eacf5f6fd437726fd1fc867201.cu
#include "includes.h" __global__ void Finalize(int size, int *originIn, int *originOut, int *bestSeenIn, int *bestSeenOut, int *adjIndexes, int *adjacency, int *mis, int *incomplete) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int bestSeen = bestSeenIn[idx]; int origin = originIn[idx]; if (bestSeen < 1000001) { int start = adjIndexes[idx]; int end = adjIndexes[idx + 1]; // Look at all the neighbors and take best values: for (int i = start; i < end; i++) { int neighbor = adjacency[i]; unsigned int challenger = bestSeenIn[neighbor]; int challengerOrigin = originIn[neighbor]; if (challenger > 0 && challenger == bestSeen && challengerOrigin > origin) { origin = challengerOrigin; } if (challenger > bestSeen) { bestSeen = challenger; origin = challengerOrigin; } } } // Write new MIS status int misStatus = -1; if (origin == idx) misStatus = 1; else if (bestSeen == 1000001) misStatus = 0; mis[idx] = misStatus; // If this node is still unassigned mark if (misStatus == -1) { incomplete[0] = 1; } } }
92f733079080608a9ab86546d5570e7470df0d86.hip
// !!! This is a file automatically generated by hipify!!! /****************************************** INCLUDES ******************************************/ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include <cmath> #include "utils/cuda_utils.h" #include "utils/c_utils.h" #include "utils/consts.h" #include "tests/tests.h" #include "kernels/scan.h" #include "kernels/mask.h" #include "kernels/compressed_mask.h" #include "kernels/counts.h" #include "kernels/symbols.h" #include "rle/rle.h" /****************************************** MAIN FUNCTION ******************************************/ void run_tests() { test_scan(&cudaScan); test_mask(&cudaMask); test_compressed_mask(&cudaCompressedMask); test_counts(&cudaCounts); test_symbols(&cudaSymbols); } char* generate_data(int size, float compressability) { srand(time(0)); int alphabet_length = 26; char *data = (char*)_malloc(sizeof(char)*size); data[0] = 'a' + rand() % alphabet_length; for (int i = 1; i < size; i++) { float r = randf(); data[i] = (r <= compressability) ? (data[i - 1]) : ('a' + rand() % alphabet_length); } return data; } void measure_performance(void(*fun)(char*,int,char**,int**,int*),char* data, int size,char* label) { char *symbols; int length; int *runs; printf("\n%s\n", label); clock_t start = clock(); fun(data, size, &symbols, &runs, &length); clock_t end = clock(); float seconds = (float)(end - start) / CLOCKS_PER_SEC; printf("Time passed: %f s\n", seconds); printf("Output length: %d * 2\n", length); free(symbols); free(runs); } void run_comparison(int size, float compressability) { printf("Generating %d MB of data with %.2f %% compressability...\n", size/MB,compressability*100); char *data = generate_data(size,compressability); printf("Running performance tests...\n"); measure_performance(&parallel_rle, data, size,"GPU VERSION"); measure_performance(&cpu_rle, data, size,"CPU VERSION"); printf("\nDone\n"); free(data); } void read_arguments(int *sizeMb, float* compressability) { printf("Provide data size int megabytes (int):\n"); scanf("%d", sizeMb); printf("Provide compressability (float): \n"); scanf("%f", compressability); } int main() { int sizeMb; float compressability; run_tests(); read_arguments(&sizeMb, &compressability); run_comparison(sizeMb*MB,compressability); return EXIT_SUCCESS; }
92f733079080608a9ab86546d5570e7470df0d86.cu
/****************************************** INCLUDES ******************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include <cmath> #include "utils/cuda_utils.h" #include "utils/c_utils.h" #include "utils/consts.h" #include "tests/tests.h" #include "kernels/scan.h" #include "kernels/mask.h" #include "kernels/compressed_mask.h" #include "kernels/counts.h" #include "kernels/symbols.h" #include "rle/rle.h" /****************************************** MAIN FUNCTION ******************************************/ void run_tests() { test_scan(&cudaScan); test_mask(&cudaMask); test_compressed_mask(&cudaCompressedMask); test_counts(&cudaCounts); test_symbols(&cudaSymbols); } char* generate_data(int size, float compressability) { srand(time(0)); int alphabet_length = 26; char *data = (char*)_malloc(sizeof(char)*size); data[0] = 'a' + rand() % alphabet_length; for (int i = 1; i < size; i++) { float r = randf(); data[i] = (r <= compressability) ? (data[i - 1]) : ('a' + rand() % alphabet_length); } return data; } void measure_performance(void(*fun)(char*,int,char**,int**,int*),char* data, int size,char* label) { char *symbols; int length; int *runs; printf("\n%s\n", label); clock_t start = clock(); fun(data, size, &symbols, &runs, &length); clock_t end = clock(); float seconds = (float)(end - start) / CLOCKS_PER_SEC; printf("Time passed: %f s\n", seconds); printf("Output length: %d * 2\n", length); free(symbols); free(runs); } void run_comparison(int size, float compressability) { printf("Generating %d MB of data with %.2f %% compressability...\n", size/MB,compressability*100); char *data = generate_data(size,compressability); printf("Running performance tests...\n"); measure_performance(&parallel_rle, data, size,"GPU VERSION"); measure_performance(&cpu_rle, data, size,"CPU VERSION"); printf("\nDone\n"); free(data); } void read_arguments(int *sizeMb, float* compressability) { printf("Provide data size int megabytes (int):\n"); scanf("%d", sizeMb); printf("Provide compressability (float): \n"); scanf("%f", compressability); } int main() { int sizeMb; float compressability; run_tests(); read_arguments(&sizeMb, &compressability); run_comparison(sizeMb*MB,compressability); return EXIT_SUCCESS; }
15a5f94c48b8bd3ec96833858c013a7157400e18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define N 4 // __global__ void MatAdd(int A[][N], int B[][N], int C[][N]){ int i = threadIdx.x; int j = threadIdx.y; C[i][j] = A[i][j] + B[i][j]; } int main(){ int A[N][N] = { {1, 5, 6, 7}, {4, 4, 8, 0}, {2, 3, 4, 5}, {2, 3, 4, 5} }; int B[N][N] = { {1, 5, 6, 7}, {4, 4, 8, 0}, {2, 3, 4, 5}, {2, 3, 4, 5} }; int C[N][N] = { {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0} }; int (*d_A)[N], (*d_B)[N], (*d_C)[N]; //Allocate memories for device copies of A,B,C hipMalloc((void**)&d_A, (N*N)*sizeof(int)); hipMalloc((void**)&d_B, (N*N)*sizeof(int)); hipMalloc((void**)&d_C, (N*N)*sizeof(int)); //Allocate space for host copies of A,B,C and setup input values hipMemcpy(d_A, A, (N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_B, B, (N*N)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_C, C, (N*N)*sizeof(int), hipMemcpyHostToDevice); //Launch MatAdd() kernel on GPU int numBlocks = 1; dim3 threadsPerBlock(N,N); hipLaunchKernelGGL(( MatAdd), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_A,d_B,d_C); //copy results back to host hipMemcpy(C, d_C, (N*N)*sizeof(int), hipMemcpyDeviceToHost); int i, j; printf("C = \n"); for(i=0;i<N;i++){ for(j=0;j<N;j++){ printf("%d ", C[i][j]); } printf("\n"); } //cleanup hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\n"); return 0; }
15a5f94c48b8bd3ec96833858c013a7157400e18.cu
#include <stdio.h> #include <stdlib.h> #define N 4 // __global__ void MatAdd(int A[][N], int B[][N], int C[][N]){ int i = threadIdx.x; int j = threadIdx.y; C[i][j] = A[i][j] + B[i][j]; } int main(){ int A[N][N] = { {1, 5, 6, 7}, {4, 4, 8, 0}, {2, 3, 4, 5}, {2, 3, 4, 5} }; int B[N][N] = { {1, 5, 6, 7}, {4, 4, 8, 0}, {2, 3, 4, 5}, {2, 3, 4, 5} }; int C[N][N] = { {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0} }; int (*d_A)[N], (*d_B)[N], (*d_C)[N]; //Allocate memories for device copies of A,B,C cudaMalloc((void**)&d_A, (N*N)*sizeof(int)); cudaMalloc((void**)&d_B, (N*N)*sizeof(int)); cudaMalloc((void**)&d_C, (N*N)*sizeof(int)); //Allocate space for host copies of A,B,C and setup input values cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice); //Launch MatAdd() kernel on GPU int numBlocks = 1; dim3 threadsPerBlock(N,N); MatAdd<<<numBlocks,threadsPerBlock>>>(d_A,d_B,d_C); //copy results back to host cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); int i, j; printf("C = \n"); for(i=0;i<N;i++){ for(j=0;j<N;j++){ printf("%d ", C[i][j]); } printf("\n"); } //cleanup cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\n"); return 0; }
2e2e17c7c77ce81c35baf6015ca879a802062893.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "search.h" // hack for sorting kdtree at customizable depth, should exists better design static KDTree* kdtree; KDTree::KDTree():_dim(352), _axis(0),_num_elements(0){ kdtree = this; } /** \brief Construct kdtree in cpu **/ void KDTree::make_tree(const std::vector<pcl::SHOT352, Eigen::aligned_allocator<pcl::SHOT352>>& input) { std::vector<int> indices(input.size()); std::iota(indices.begin(), indices.end(), 0); Node root; root.axis = 0; root.search_begin = indices.begin(); root.search_end = indices.end(); std::vector<Node, Eigen::aligned_allocator<Node>>Nodes; Nodes.emplace_back(root); while(!Nodes.empty()){ Node curr = Nodes.back(); Nodes.pop_back(); if(curr.search_end > curr.search_begin + 1){ curr.id = _num_elements++; if (curr.parent != -1){ if (curr.isleft) tree[curr.parent].left = curr.id; else tree[curr.parent].right = curr.id; } Node left, right; _axis = curr.axis; if (!kdtree) kdtree = this; std::sort(curr.search_begin, curr.search_end, [&input](size_t i1, size_t i2){ return input[i1].descriptor[kdtree->_axis] < input[i2].descriptor[kdtree->_axis];}); auto mid = curr.search_begin + (curr.search_end - curr.search_begin)/2; curr.idx = *mid; if (mid - curr.search_begin > 0){ left.id = mid - curr.search_begin+ curr.id + 1; left.parent = curr.id; left.axis = (curr.axis + 1) % _dim; left.isleft = true; } if (curr.search_end - mid > 1){ right.axis = (curr.axis + 1) % _dim; right.id = curr.id + 1; right.parent = curr.id; } curr.left = left.id; curr.right = right.id; left.search_begin = curr.search_begin; left.search_end = mid; right.search_begin = mid + 1; right.search_end = curr.search_end; if (left.search_end > left.search_begin ) Nodes.emplace_back(left); if (right.search_end > right.search_begin) Nodes.emplace_back(right); tree.emplace_back(curr); } else if (curr.search_begin +1 == curr.search_end){ curr.id = _num_elements++; if (curr.parent != -1){ if (curr.isleft) tree[curr.parent].left = curr.id; else tree[curr.parent].right = curr.id; } curr.idx = *curr.search_begin; tree.emplace_back(curr); } } } /** \brief calculate L2 distance between descriptor **/ __device__ float descriptorDistance(const pcl::SHOT352& pt1, const pcl::SHOT352 &pt2){ const int desclen_ = 352; float dist = 0; for (int i = 0; i < desclen_; ++i){ float delta = pt1.descriptor[i] - pt2.descriptor[i]; dist += delta * delta; } return dist; } /** \brief find nearest neighbor with kdtree **/ __global__ void kernFindCorrespondence(int N, const Node* nodes, const pcl::SHOT352* input, const pcl::SHOT352* queries, int* indices, float* dist){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ pcl::SHOT352 query = queries[index]; int n_idx = nodes[0].idx; int n_closest = 0; int split_axis = nodes[0].axis; float d_closest = descriptorDistance( input[n_idx], query); int curr_node = query.descriptor[split_axis] > input[n_idx].descriptor[split_axis] ? nodes[0].right:nodes[0].left; bool explored = false; while(true){ while(curr_node != -1){ n_idx = nodes[curr_node].idx; split_axis = nodes[curr_node].axis; float distance = descriptorDistance(input[n_idx], query); if (distance < d_closest){ d_closest = distance; n_closest = curr_node; explored = false; } curr_node = query.descriptor[split_axis] > input[n_idx].descriptor[split_axis]? nodes[curr_node].right: nodes[curr_node].left; } if (explored ||nodes[n_closest].parent == -1){ break; } else{ // explore parents curr_node = nodes[n_closest].parent; n_idx = nodes[curr_node].idx; split_axis = nodes[curr_node].axis; float hyper_dist = query.descriptor[split_axis] - input[n_idx].descriptor[split_axis]; if (abs(hyper_dist) < d_closest){ explored = true; curr_node = hyper_dist > 0? nodes[curr_node].right:nodes[curr_node].left; }else{ break; } } } indices[index] = nodes[n_closest].idx; dist[index] = d_closest; } } __global__ void kernFindCorrespBF(int N, int n, const pcl::SHOT352* input, const pcl::SHOT352* queries, int* indices, float* dist){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ pcl::SHOT352 query = queries[index]; int n_closest = 0; float d_closest = descriptorDistance( input[0], query); for (int i = 1; i < n ; i++){ float d_curr = descriptorDistance( input[i], query); if (d_curr < d_closest){ d_closest = d_curr; n_closest = i; } } indices[index] = n_closest; dist[index] = d_closest; } } void Search::setInputCloud(const pcl::PointCloud<pcl::SHOT352>::ConstPtr &input) { _input = input; _N_input = static_cast<int>(input->points.size()); // _kdtree.make_tree(input->points); } void Search::bruteForce(const pcl::CorrespondencesPtr &model_scene_corrs){ if (!_search || !_input || _N_input == 0 || _N_search == 0){ std::cerr << "Search function not properly setup" << std::endl; exit(1); } assert(_N_input < _N_search); int *dev_neighbor_indices = NULL; pcl::SHOT352 *dev_search = NULL; pcl::SHOT352 *dev_input = NULL; float *dev_dist = NULL; std::vector<int> _neighbor_indices; std::vector<float> _neighbor_distances; // std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); hipMalloc((void**)&dev_neighbor_indices, _N_search * sizeof(int)); checkCUDAError("malloc dev_neighbor indices error"); hipMemset(dev_neighbor_indices, -1, _N_search * sizeof(int)); checkCUDAError("memset ni error"); hipMalloc((void**)&dev_input, _N_input * sizeof(pcl::SHOT352)); checkCUDAError("malloc dev_neighbor distances error"); hipMemcpy(dev_input, &(_input->points[0]), _N_input * sizeof(pcl::SHOT352), hipMemcpyHostToDevice); checkCUDAError("dev input memcpy error"); hipMalloc((void**)&dev_search, _N_search * sizeof(pcl::SHOT352)); checkCUDAError("malloc dps error"); hipMemcpy(dev_search, &(_search->points[0]), _N_search * sizeof(pcl::SHOT352), hipMemcpyHostToDevice); checkCUDAError("memcpy ps error"); hipMalloc((void**)&dev_dist, _N_search * sizeof(float)); checkCUDAError("dev_dist malloc"); dim3 fullBlockPerGrid_points (static_cast<u_int32_t >((_N_search + blockSize - 1)/blockSize)); hipLaunchKernelGGL(( kernFindCorrespBF), dim3(fullBlockPerGrid_points), dim3(blockSize), 0, 0, _N_search, _N_input, dev_input, dev_search, dev_neighbor_indices, dev_dist); checkCUDAError("KernSearchCorres error"); _neighbor_indices.resize(_N_search); hipMemcpy(&(_neighbor_indices[0]), dev_neighbor_indices, sizeof(int) * _N_search, hipMemcpyDeviceToHost); checkCUDAError("cudamemcpy num neigbors issue"); _neighbor_distances.resize(_N_search); hipMemcpy(&(_neighbor_distances[0]), dev_dist, sizeof(float) * _N_search, hipMemcpyDeviceToHost); for (int i = 0; i < _N_search; ++i){ if(isfinite(_neighbor_distances[i]) && _neighbor_distances[i] < 0.25f){ pcl::Correspondence corr (_neighbor_indices[i], i, _neighbor_distances[i]); model_scene_corrs->emplace_back(corr); } } // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); // auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); // std::cout << "GPU implementation bf corresp search takes: " << duration << std::endl; hipFree(dev_search); hipFree(dev_input); hipFree(dev_neighbor_indices); hipFree(dev_dist); // hipFree(dev_tree); checkCUDAError("cuda free search"); } //void Search::search(const pcl::CorrespondencesPtr &model_scene_corrs) { // if (!_search || !_input || _N_input == 0 || _N_search == 0){ // std::cerr << "Search function not properly setup" << std::endl; // exit(1); // } // // const std::vector<Node, Eigen::aligned_allocator<Node>>& tree = _kdtree.getTree(); // assert(_N_input == tree.size()); // // int *dev_neighbor_indices = NULL; // pcl::SHOT352 *dev_search = NULL; // pcl::SHOT352 *dev_input = NULL; // Node* dev_tree = NULL; // float *dev_dist = NULL; // std::vector<int> _neighbor_indices; // std::vector<float> _neighbor_distances; // // std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); // // hipMalloc((void**)&dev_tree, _N_input * sizeof(Node)); // checkCUDAError("cudamalloc dev tree error"); // hipMemcpy(dev_tree, &tree[0], _N_input * sizeof(Node), hipMemcpyHostToDevice); // checkCUDAError("cudammcpy dev_tree error"); // // hipMalloc((void**)&dev_neighbor_indices, _N_search * sizeof(int)); // checkCUDAError("malloc dev_neighbor indices error"); // hipMemset(dev_neighbor_indices, -1, _N_search * sizeof(int)); // checkCUDAError("memset ni error"); // // hipMalloc((void**)&dev_input, _N_input * sizeof(pcl::SHOT352)); // checkCUDAError("malloc dev_neighbor distances error"); // hipMemcpy(dev_input, &(_input->points[0]), _N_input * sizeof(pcl::SHOT352), hipMemcpyHostToDevice); // checkCUDAError("dev input memcpy error"); // // hipMalloc((void**)&dev_search, _N_search * sizeof(pcl::SHOT352)); // checkCUDAError("malloc dps error"); // hipMemcpy(dev_search, &(_search->points[0]), _N_search * sizeof(pcl::SHOT352), hipMemcpyHostToDevice); // checkCUDAError("memcpy ps error"); // // // hipMalloc((void**)&dev_dist, _N_search * sizeof(float)); // checkCUDAError("dev_dist malloc"); // // // dim3 fullBlockPerGrid_points (static_cast<u_int32_t >((_N_search + blockSize - 1)/blockSize)); // // kernFindCorrespondence<<<fullBlockPerGrid_points, blockSize>>>(_N_search, dev_tree, dev_input, dev_search, // dev_neighbor_indices, dev_dist); // checkCUDAError("KernSearchCorres error"); // // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); // auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); // std::cout << "GPU implementation kdtree corresp search takes: " << duration << std::endl; // // // _neighbor_indices.resize(_N_search); // hipMemcpy(&(_neighbor_indices[0]), dev_neighbor_indices, sizeof(int) * _N_search, hipMemcpyDeviceToHost); // checkCUDAError("cudamemcpy num neigbors issue"); // // _neighbor_distances.resize(_N_search); // hipMemcpy(&(_neighbor_distances[0]), dev_dist, sizeof(float) * _N_search, hipMemcpyDeviceToHost); // // // for (int i = 0; i < _N_search; ++i){ //// std:: cout << _neighbor_distances[i] << std::endl; // if(isfinite(_neighbor_distances[i]) && _neighbor_distances[i] < 0.25f){ // pcl::Correspondence corr (_neighbor_indices[i], i, _neighbor_distances[i]); // model_scene_corrs->emplace_back(corr); // } // } // // hipFree(dev_search); // hipFree(dev_input); // hipFree(dev_neighbor_indices); // hipFree(dev_dist); // hipFree(dev_tree); // checkCUDAError("cuda free search"); // //}
2e2e17c7c77ce81c35baf6015ca879a802062893.cu
#include "search.h" // hack for sorting kdtree at customizable depth, should exists better design static KDTree* kdtree; KDTree::KDTree():_dim(352), _axis(0),_num_elements(0){ kdtree = this; } /** \brief Construct kdtree in cpu **/ void KDTree::make_tree(const std::vector<pcl::SHOT352, Eigen::aligned_allocator<pcl::SHOT352>>& input) { std::vector<int> indices(input.size()); std::iota(indices.begin(), indices.end(), 0); Node root; root.axis = 0; root.search_begin = indices.begin(); root.search_end = indices.end(); std::vector<Node, Eigen::aligned_allocator<Node>>Nodes; Nodes.emplace_back(root); while(!Nodes.empty()){ Node curr = Nodes.back(); Nodes.pop_back(); if(curr.search_end > curr.search_begin + 1){ curr.id = _num_elements++; if (curr.parent != -1){ if (curr.isleft) tree[curr.parent].left = curr.id; else tree[curr.parent].right = curr.id; } Node left, right; _axis = curr.axis; if (!kdtree) kdtree = this; std::sort(curr.search_begin, curr.search_end, [&input](size_t i1, size_t i2){ return input[i1].descriptor[kdtree->_axis] < input[i2].descriptor[kdtree->_axis];}); auto mid = curr.search_begin + (curr.search_end - curr.search_begin)/2; curr.idx = *mid; if (mid - curr.search_begin > 0){ left.id = mid - curr.search_begin+ curr.id + 1; left.parent = curr.id; left.axis = (curr.axis + 1) % _dim; left.isleft = true; } if (curr.search_end - mid > 1){ right.axis = (curr.axis + 1) % _dim; right.id = curr.id + 1; right.parent = curr.id; } curr.left = left.id; curr.right = right.id; left.search_begin = curr.search_begin; left.search_end = mid; right.search_begin = mid + 1; right.search_end = curr.search_end; if (left.search_end > left.search_begin ) Nodes.emplace_back(left); if (right.search_end > right.search_begin) Nodes.emplace_back(right); tree.emplace_back(curr); } else if (curr.search_begin +1 == curr.search_end){ curr.id = _num_elements++; if (curr.parent != -1){ if (curr.isleft) tree[curr.parent].left = curr.id; else tree[curr.parent].right = curr.id; } curr.idx = *curr.search_begin; tree.emplace_back(curr); } } } /** \brief calculate L2 distance between descriptor **/ __device__ float descriptorDistance(const pcl::SHOT352& pt1, const pcl::SHOT352 &pt2){ const int desclen_ = 352; float dist = 0; for (int i = 0; i < desclen_; ++i){ float delta = pt1.descriptor[i] - pt2.descriptor[i]; dist += delta * delta; } return dist; } /** \brief find nearest neighbor with kdtree **/ __global__ void kernFindCorrespondence(int N, const Node* nodes, const pcl::SHOT352* input, const pcl::SHOT352* queries, int* indices, float* dist){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ pcl::SHOT352 query = queries[index]; int n_idx = nodes[0].idx; int n_closest = 0; int split_axis = nodes[0].axis; float d_closest = descriptorDistance( input[n_idx], query); int curr_node = query.descriptor[split_axis] > input[n_idx].descriptor[split_axis] ? nodes[0].right:nodes[0].left; bool explored = false; while(true){ while(curr_node != -1){ n_idx = nodes[curr_node].idx; split_axis = nodes[curr_node].axis; float distance = descriptorDistance(input[n_idx], query); if (distance < d_closest){ d_closest = distance; n_closest = curr_node; explored = false; } curr_node = query.descriptor[split_axis] > input[n_idx].descriptor[split_axis]? nodes[curr_node].right: nodes[curr_node].left; } if (explored ||nodes[n_closest].parent == -1){ break; } else{ // explore parents curr_node = nodes[n_closest].parent; n_idx = nodes[curr_node].idx; split_axis = nodes[curr_node].axis; float hyper_dist = query.descriptor[split_axis] - input[n_idx].descriptor[split_axis]; if (abs(hyper_dist) < d_closest){ explored = true; curr_node = hyper_dist > 0? nodes[curr_node].right:nodes[curr_node].left; }else{ break; } } } indices[index] = nodes[n_closest].idx; dist[index] = d_closest; } } __global__ void kernFindCorrespBF(int N, int n, const pcl::SHOT352* input, const pcl::SHOT352* queries, int* indices, float* dist){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N){ pcl::SHOT352 query = queries[index]; int n_closest = 0; float d_closest = descriptorDistance( input[0], query); for (int i = 1; i < n ; i++){ float d_curr = descriptorDistance( input[i], query); if (d_curr < d_closest){ d_closest = d_curr; n_closest = i; } } indices[index] = n_closest; dist[index] = d_closest; } } void Search::setInputCloud(const pcl::PointCloud<pcl::SHOT352>::ConstPtr &input) { _input = input; _N_input = static_cast<int>(input->points.size()); // _kdtree.make_tree(input->points); } void Search::bruteForce(const pcl::CorrespondencesPtr &model_scene_corrs){ if (!_search || !_input || _N_input == 0 || _N_search == 0){ std::cerr << "Search function not properly setup" << std::endl; exit(1); } assert(_N_input < _N_search); int *dev_neighbor_indices = NULL; pcl::SHOT352 *dev_search = NULL; pcl::SHOT352 *dev_input = NULL; float *dev_dist = NULL; std::vector<int> _neighbor_indices; std::vector<float> _neighbor_distances; // std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); cudaMalloc((void**)&dev_neighbor_indices, _N_search * sizeof(int)); checkCUDAError("malloc dev_neighbor indices error"); cudaMemset(dev_neighbor_indices, -1, _N_search * sizeof(int)); checkCUDAError("memset ni error"); cudaMalloc((void**)&dev_input, _N_input * sizeof(pcl::SHOT352)); checkCUDAError("malloc dev_neighbor distances error"); cudaMemcpy(dev_input, &(_input->points[0]), _N_input * sizeof(pcl::SHOT352), cudaMemcpyHostToDevice); checkCUDAError("dev input memcpy error"); cudaMalloc((void**)&dev_search, _N_search * sizeof(pcl::SHOT352)); checkCUDAError("malloc dps error"); cudaMemcpy(dev_search, &(_search->points[0]), _N_search * sizeof(pcl::SHOT352), cudaMemcpyHostToDevice); checkCUDAError("memcpy ps error"); cudaMalloc((void**)&dev_dist, _N_search * sizeof(float)); checkCUDAError("dev_dist malloc"); dim3 fullBlockPerGrid_points (static_cast<u_int32_t >((_N_search + blockSize - 1)/blockSize)); kernFindCorrespBF<<<fullBlockPerGrid_points, blockSize>>>(_N_search, _N_input, dev_input, dev_search, dev_neighbor_indices, dev_dist); checkCUDAError("KernSearchCorres error"); _neighbor_indices.resize(_N_search); cudaMemcpy(&(_neighbor_indices[0]), dev_neighbor_indices, sizeof(int) * _N_search, cudaMemcpyDeviceToHost); checkCUDAError("cudamemcpy num neigbors issue"); _neighbor_distances.resize(_N_search); cudaMemcpy(&(_neighbor_distances[0]), dev_dist, sizeof(float) * _N_search, cudaMemcpyDeviceToHost); for (int i = 0; i < _N_search; ++i){ if(isfinite(_neighbor_distances[i]) && _neighbor_distances[i] < 0.25f){ pcl::Correspondence corr (_neighbor_indices[i], i, _neighbor_distances[i]); model_scene_corrs->emplace_back(corr); } } // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); // auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); // std::cout << "GPU implementation bf corresp search takes: " << duration << std::endl; cudaFree(dev_search); cudaFree(dev_input); cudaFree(dev_neighbor_indices); cudaFree(dev_dist); // cudaFree(dev_tree); checkCUDAError("cuda free search"); } //void Search::search(const pcl::CorrespondencesPtr &model_scene_corrs) { // if (!_search || !_input || _N_input == 0 || _N_search == 0){ // std::cerr << "Search function not properly setup" << std::endl; // exit(1); // } // // const std::vector<Node, Eigen::aligned_allocator<Node>>& tree = _kdtree.getTree(); // assert(_N_input == tree.size()); // // int *dev_neighbor_indices = NULL; // pcl::SHOT352 *dev_search = NULL; // pcl::SHOT352 *dev_input = NULL; // Node* dev_tree = NULL; // float *dev_dist = NULL; // std::vector<int> _neighbor_indices; // std::vector<float> _neighbor_distances; // // std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); // // cudaMalloc((void**)&dev_tree, _N_input * sizeof(Node)); // checkCUDAError("cudamalloc dev tree error"); // cudaMemcpy(dev_tree, &tree[0], _N_input * sizeof(Node), cudaMemcpyHostToDevice); // checkCUDAError("cudammcpy dev_tree error"); // // cudaMalloc((void**)&dev_neighbor_indices, _N_search * sizeof(int)); // checkCUDAError("malloc dev_neighbor indices error"); // cudaMemset(dev_neighbor_indices, -1, _N_search * sizeof(int)); // checkCUDAError("memset ni error"); // // cudaMalloc((void**)&dev_input, _N_input * sizeof(pcl::SHOT352)); // checkCUDAError("malloc dev_neighbor distances error"); // cudaMemcpy(dev_input, &(_input->points[0]), _N_input * sizeof(pcl::SHOT352), cudaMemcpyHostToDevice); // checkCUDAError("dev input memcpy error"); // // cudaMalloc((void**)&dev_search, _N_search * sizeof(pcl::SHOT352)); // checkCUDAError("malloc dps error"); // cudaMemcpy(dev_search, &(_search->points[0]), _N_search * sizeof(pcl::SHOT352), cudaMemcpyHostToDevice); // checkCUDAError("memcpy ps error"); // // // cudaMalloc((void**)&dev_dist, _N_search * sizeof(float)); // checkCUDAError("dev_dist malloc"); // // // dim3 fullBlockPerGrid_points (static_cast<u_int32_t >((_N_search + blockSize - 1)/blockSize)); // // kernFindCorrespondence<<<fullBlockPerGrid_points, blockSize>>>(_N_search, dev_tree, dev_input, dev_search, // dev_neighbor_indices, dev_dist); // checkCUDAError("KernSearchCorres error"); // // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); // auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); // std::cout << "GPU implementation kdtree corresp search takes: " << duration << std::endl; // // // _neighbor_indices.resize(_N_search); // cudaMemcpy(&(_neighbor_indices[0]), dev_neighbor_indices, sizeof(int) * _N_search, cudaMemcpyDeviceToHost); // checkCUDAError("cudamemcpy num neigbors issue"); // // _neighbor_distances.resize(_N_search); // cudaMemcpy(&(_neighbor_distances[0]), dev_dist, sizeof(float) * _N_search, cudaMemcpyDeviceToHost); // // // for (int i = 0; i < _N_search; ++i){ //// std:: cout << _neighbor_distances[i] << std::endl; // if(isfinite(_neighbor_distances[i]) && _neighbor_distances[i] < 0.25f){ // pcl::Correspondence corr (_neighbor_indices[i], i, _neighbor_distances[i]); // model_scene_corrs->emplace_back(corr); // } // } // // cudaFree(dev_search); // cudaFree(dev_input); // cudaFree(dev_neighbor_indices); // cudaFree(dev_dist); // cudaFree(dev_tree); // checkCUDAError("cuda free search"); // //}
335b7979b4bbef6a1a89d44161849fc895f96df1.hip
// !!! This is a file automatically generated by hipify!!! #include "ManagePhotonAbsorption.h" #include <hip/hip_runtime.h> #include <iostream> #include <hiprand/hiprand_kernel.h> #include <hip/hip_cooperative_groups.h> #include <vector> #include "MyCudaToolkit.h" using namespace std; namespace cg = cooperative_groups; __device__ unsigned int reduce_sum(long in, cg::thread_block cta) { extern __shared__ long sdata[]; // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; if (in == -1) printf("error: countflag not initialzed\n"); cg::sync(cta); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } cg::sync(cta); } return sdata[0]; } /// <summary> /// kernel function for simulation /// For every depth, do the loop #(photonnum/(blockdim*griddim)) times, calculate photon absorption, store results into parameter count; /// Then continue the loop for next depth. /// </summary> /// <param name="count"></param> Array for returning results /// <param name="depthbin_ary"></param> Incident photon depths (stored as bin id of the depth) /// <param name="depthsize"></param> Total number of photon sets /// <param name="anglebinsize"></param> Total bin number of angle /// <param name="lut"></param> Pointer to the lut data /// <param name="photon_num"></param> Photon number for each sets. /// <param name="rndstates"></param> Random number generater /// <param name="seed"></param> Random number seed /// <returns></returns> __global__ void SimulatePhotonAbsorption(long* count, int* depthbin_ary, int depthsize, int anglebinsize, double* lut, long* photon_num, hiprandStateXORWOW_t* rndstates, unsigned int seed) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // Determine thread ID int bid = blockIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; int local_tid = threadIdx.x; int step = blockDim.x * gridDim.x; // Initialise the RNG hiprand_init(seed, tid, 0, &rndstates[tid]); hiprandState_t localState = rndstates[tid]; //begin simulation for each depth for (int depthid = 0; depthid < depthsize; depthid++) { unsigned int countflag = 0; for (unsigned photonid = tid; photonid < photon_num[depthid]; photonid += step) { int anglebin = (int)(anglebinsize * hiprand_uniform_double(&localState)); double prob = lut[depthbin_ary[depthid] * anglebinsize + anglebin]; double rndm = hiprand_uniform_double(&localState); if (rndm < prob) countflag++; } countflag = reduce_sum(countflag, cta); if (threadIdx.x == 0) { count[bid + depthid * gridDim.x] = countflag; } } } /************************************************** * Calculate number of photon absorbed for given depth & incident photon number * ( If rndmseed==0, rndmseed=time(nullptr) ) **************************************************/ vector<long> ManagePhotonAbsorption::getAbsorbedPhotonNum(vector<double> depth, vector<long> incident_photon_num, unsigned int rndmseed) { unsigned int m_device = 0; struct hipDeviceProp_t deviceProperties; CHECK(hipGetDeviceProperties(&deviceProperties, m_device)); CHECK(hipSetDevice(m_device)); vector<int> lut_size = look_up_table->getLUTSize(); //Distribute depth array into given depth bins vector<int> depth_in_bin; //The last entry for lut_size is num of bins for depth int depthbinnum = lut_size[0]; for (int i = 0; i < depth.size(); i++) { int tmp = (int)((depth[i] - min_depth) / (max_depth - min_depth) * depthbinnum); depth_in_bin.push_back(tmp); } //Memory allocation in GPU for photonnum and depth_in_bin transStart = cpuSecond(); long* d_photonnum; CHECK(hipMalloc((void**)&d_photonnum, depth.size() * sizeof(long))); CHECK(hipMemcpy((void*)d_photonnum, (void*)&(incident_photon_num[0]), depth.size() * sizeof(long), hipMemcpyHostToDevice)); int* d_depth_in_bin; CHECK(hipMalloc((void**)&d_depth_in_bin, depth.size() * sizeof(int))); CHECK(hipMemcpy((void*)d_depth_in_bin, (void*)&(depth_in_bin[0]), depth.size() * sizeof(int), hipMemcpyHostToDevice)); transElaps += (cpuSecond() - transStart); //DepthCnt_ary: count absorbed photon for each depth long* h_DepthCnt_ary, * d_DepthCnt_ary; //kernel function setup ( numSMs==30 ) dim3 block, grid; block.x = threadBlockSize; grid.x = 0; grid.x = (incident_photon_num[0] - 1) / block.x + 1; unsigned int blocksPerSM = 10; unsigned int numSMs = deviceProperties.multiProcessorCount; while (grid.x > 2 * blocksPerSM * numSMs){ grid.x >>= 1; } size_t count_array_size = grid.x * depth.size() * sizeof(long); CHECK(hipMalloc((void**)&d_DepthCnt_ary, count_array_size)); h_DepthCnt_ary = (long*)malloc(count_array_size); //Random number simulation if (totalThreadNum < grid.x * block.x) { totalThreadNum = grid.x * block.x; transStart = cpuSecond(); CHECK(hipMalloc((void**)&states, sizeof(hiprandStateXORWOW_t) * block.x * grid.x)); transElaps += (cpuSecond() - transStart); } if (rndmseed == 0) rndmseed = time(nullptr); kernelStart = cpuSecond(); SimulatePhotonAbsorption << <grid, block, block.x * sizeof(long) >> > (d_DepthCnt_ary, d_depth_in_bin, depth.size(), lut_size[1], d_lut, d_photonnum, states, rndmseed ); kernelElaps += (kernelStart - cpuSecond()); hipError_t cudaStatus = hipGetLastError(); CHECK(cudaStatus); //collect results transStart = cpuSecond(); CHECK(hipMemcpy(h_DepthCnt_ary, d_DepthCnt_ary, count_array_size, hipMemcpyDeviceToHost)); transElaps += (cpuSecond() - transStart); vector<long> absorbcnt; for (int i = 0; i < depth.size(); i++) { int tmpcnt = 0; for (int j = 0; j < grid.x; j++) { tmpcnt += h_DepthCnt_ary[i * grid.x + j]; } absorbcnt.push_back(tmpcnt); } //Free storage CHECK(hipFree(d_DepthCnt_ary)); free(h_DepthCnt_ary); CHECK(hipFree(d_depth_in_bin)); return absorbcnt; } ManagePhotonAbsorption::ManagePhotonAbsorption(LUT* lut, double maxdepth, double mindepth, int blocksize) : look_up_table(lut), max_depth(maxdepth), min_depth(mindepth), threadBlockSize(blocksize), totalThreadNum(0), kernelStart(0), kernelElaps(0), transStart(0), transElaps(0){ const vector<double>* h_lut_ptr = look_up_table->getLUTAddress(); vector<int> lut_size = look_up_table->getLUTSize(); int lut_total_size = 1; for (int i = 0; i < lut_size.size(); i++) lut_total_size *= lut_size[i]; transStart = cpuSecond(); CHECK(hipMalloc((void**)&d_lut, lut_total_size * sizeof(double))); CHECK(hipMemcpy((void*)d_lut, &((*h_lut_ptr)[0]), lut_total_size * sizeof(double), hipMemcpyHostToDevice)); transElaps += (cpuSecond() - transStart); } void ManagePhotonAbsorption::PrintTimeConsume() { printf("Total time consumption for CPU-GPU transfer: %f s\n", transElaps); printf("Total time consumption for kernel function: %f s\n", kernelElaps); }
335b7979b4bbef6a1a89d44161849fc895f96df1.cu
#include "ManagePhotonAbsorption.h" #include <cuda_runtime.h> #include <iostream> #include <curand_kernel.h> #include <cooperative_groups.h> #include <vector> #include "MyCudaToolkit.h" using namespace std; namespace cg = cooperative_groups; __device__ unsigned int reduce_sum(long in, cg::thread_block cta) { extern __shared__ long sdata[]; // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; if (in == -1) printf("error: countflag not initialzed\n"); cg::sync(cta); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } cg::sync(cta); } return sdata[0]; } /// <summary> /// kernel function for simulation /// For every depth, do the loop #(photonnum/(blockdim*griddim)) times, calculate photon absorption, store results into parameter count; /// Then continue the loop for next depth. /// </summary> /// <param name="count"></param> Array for returning results /// <param name="depthbin_ary"></param> Incident photon depths (stored as bin id of the depth) /// <param name="depthsize"></param> Total number of photon sets /// <param name="anglebinsize"></param> Total bin number of angle /// <param name="lut"></param> Pointer to the lut data /// <param name="photon_num"></param> Photon number for each sets. /// <param name="rndstates"></param> Random number generater /// <param name="seed"></param> Random number seed /// <returns></returns> __global__ void SimulatePhotonAbsorption(long* count, int* depthbin_ary, int depthsize, int anglebinsize, double* lut, long* photon_num, curandStateXORWOW_t* rndstates, unsigned int seed) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // Determine thread ID int bid = blockIdx.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; int local_tid = threadIdx.x; int step = blockDim.x * gridDim.x; // Initialise the RNG curand_init(seed, tid, 0, &rndstates[tid]); curandState localState = rndstates[tid]; //begin simulation for each depth for (int depthid = 0; depthid < depthsize; depthid++) { unsigned int countflag = 0; for (unsigned photonid = tid; photonid < photon_num[depthid]; photonid += step) { int anglebin = (int)(anglebinsize * curand_uniform_double(&localState)); double prob = lut[depthbin_ary[depthid] * anglebinsize + anglebin]; double rndm = curand_uniform_double(&localState); if (rndm < prob) countflag++; } countflag = reduce_sum(countflag, cta); if (threadIdx.x == 0) { count[bid + depthid * gridDim.x] = countflag; } } } /************************************************** * Calculate number of photon absorbed for given depth & incident photon number * ( If rndmseed==0, rndmseed=time(nullptr) ) **************************************************/ vector<long> ManagePhotonAbsorption::getAbsorbedPhotonNum(vector<double> depth, vector<long> incident_photon_num, unsigned int rndmseed) { unsigned int m_device = 0; struct cudaDeviceProp deviceProperties; CHECK(cudaGetDeviceProperties(&deviceProperties, m_device)); CHECK(cudaSetDevice(m_device)); vector<int> lut_size = look_up_table->getLUTSize(); //Distribute depth array into given depth bins vector<int> depth_in_bin; //The last entry for lut_size is num of bins for depth int depthbinnum = lut_size[0]; for (int i = 0; i < depth.size(); i++) { int tmp = (int)((depth[i] - min_depth) / (max_depth - min_depth) * depthbinnum); depth_in_bin.push_back(tmp); } //Memory allocation in GPU for photonnum and depth_in_bin transStart = cpuSecond(); long* d_photonnum; CHECK(cudaMalloc((void**)&d_photonnum, depth.size() * sizeof(long))); CHECK(cudaMemcpy((void*)d_photonnum, (void*)&(incident_photon_num[0]), depth.size() * sizeof(long), cudaMemcpyHostToDevice)); int* d_depth_in_bin; CHECK(cudaMalloc((void**)&d_depth_in_bin, depth.size() * sizeof(int))); CHECK(cudaMemcpy((void*)d_depth_in_bin, (void*)&(depth_in_bin[0]), depth.size() * sizeof(int), cudaMemcpyHostToDevice)); transElaps += (cpuSecond() - transStart); //DepthCnt_ary: count absorbed photon for each depth long* h_DepthCnt_ary, * d_DepthCnt_ary; //kernel function setup ( numSMs==30 ) dim3 block, grid; block.x = threadBlockSize; grid.x = 0; grid.x = (incident_photon_num[0] - 1) / block.x + 1; unsigned int blocksPerSM = 10; unsigned int numSMs = deviceProperties.multiProcessorCount; while (grid.x > 2 * blocksPerSM * numSMs){ grid.x >>= 1; } size_t count_array_size = grid.x * depth.size() * sizeof(long); CHECK(cudaMalloc((void**)&d_DepthCnt_ary, count_array_size)); h_DepthCnt_ary = (long*)malloc(count_array_size); //Random number simulation if (totalThreadNum < grid.x * block.x) { totalThreadNum = grid.x * block.x; transStart = cpuSecond(); CHECK(cudaMalloc((void**)&states, sizeof(curandStateXORWOW_t) * block.x * grid.x)); transElaps += (cpuSecond() - transStart); } if (rndmseed == 0) rndmseed = time(nullptr); kernelStart = cpuSecond(); SimulatePhotonAbsorption << <grid, block, block.x * sizeof(long) >> > (d_DepthCnt_ary, d_depth_in_bin, depth.size(), lut_size[1], d_lut, d_photonnum, states, rndmseed ); kernelElaps += (kernelStart - cpuSecond()); cudaError_t cudaStatus = cudaGetLastError(); CHECK(cudaStatus); //collect results transStart = cpuSecond(); CHECK(cudaMemcpy(h_DepthCnt_ary, d_DepthCnt_ary, count_array_size, cudaMemcpyDeviceToHost)); transElaps += (cpuSecond() - transStart); vector<long> absorbcnt; for (int i = 0; i < depth.size(); i++) { int tmpcnt = 0; for (int j = 0; j < grid.x; j++) { tmpcnt += h_DepthCnt_ary[i * grid.x + j]; } absorbcnt.push_back(tmpcnt); } //Free storage CHECK(cudaFree(d_DepthCnt_ary)); free(h_DepthCnt_ary); CHECK(cudaFree(d_depth_in_bin)); return absorbcnt; } ManagePhotonAbsorption::ManagePhotonAbsorption(LUT* lut, double maxdepth, double mindepth, int blocksize) : look_up_table(lut), max_depth(maxdepth), min_depth(mindepth), threadBlockSize(blocksize), totalThreadNum(0), kernelStart(0), kernelElaps(0), transStart(0), transElaps(0){ const vector<double>* h_lut_ptr = look_up_table->getLUTAddress(); vector<int> lut_size = look_up_table->getLUTSize(); int lut_total_size = 1; for (int i = 0; i < lut_size.size(); i++) lut_total_size *= lut_size[i]; transStart = cpuSecond(); CHECK(cudaMalloc((void**)&d_lut, lut_total_size * sizeof(double))); CHECK(cudaMemcpy((void*)d_lut, &((*h_lut_ptr)[0]), lut_total_size * sizeof(double), cudaMemcpyHostToDevice)); transElaps += (cpuSecond() - transStart); } void ManagePhotonAbsorption::PrintTimeConsume() { printf("Total time consumption for CPU-GPU transfer: %f s\n", transElaps); printf("Total time consumption for kernel function: %f s\n", kernelElaps); }
c95546c7d47163e1d63a504e5f689b1391e132d3.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019-2020, NVIDIA CORPORATION. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /////////////////////////////////////////////////////////////////////////////// // LOMBSCARGLE // /////////////////////////////////////////////////////////////////////////////// /* import cupy as cp import matplotlib.pyplot as plt First define some input parameters for the signal: A = 2. w = 1. phi = 0.5 * cp.pi nin = 10000 nout = 1000000 r = cp.random.rand(nin) x = cp.linspace(0.01, 10*cp.pi, nin) Plot a sine wave for the selected times: y = A * cp.sin(w*x+phi) Define the array of frequencies for which to compute the periodogram: f = cp.linspace(0.01, 10, nout) Calculate Lomb-Scargle periodogram: pgram = cusignal.lombscargle(x, y, f, normalize=True) */ #include <math.h> #include <stdlib.h> #include <stdio.h> #include <chrono> #include <hip/hip_runtime.h> __global__ void lombscargle( const int x_shape, const int freqs_shape, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ freqs, float *__restrict__ pgram, const float y_dot ) { const int tx = ( blockIdx.x * blockDim.x + threadIdx.x ) ; const int stride = ( blockDim.x * gridDim.x ) ; for ( int tid = tx; tid < freqs_shape; tid += stride ) { float freq = freqs[tid] ; float xc = 0; float xs = 0; float cc = 0; float ss = 0; float cs = 0; float c; float s; for ( int j = 0; j < x_shape; j++ ) { sincosf( freq * x[j], &s, &c ); xc += y[j] * c; xs += y[j] * s; cc += c * c; ss += s * s; cs += c * s; } float c_tau; float s_tau; float tau = atan2f( 2.0f * cs, cc - ss ) / ( 2.0f * freq ) ; sincosf( freq * tau, &s_tau, &c_tau ); float c_tau2 = c_tau * c_tau ; float s_tau2 = s_tau * s_tau ; float cs_tau = 2.0f * c_tau * s_tau ; pgram[tid] = ( 0.5f * ( ( ( c_tau * xc + s_tau * xs ) * ( c_tau * xc + s_tau * xs ) / ( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) + ( ( c_tau * xs - s_tau * xc ) * ( c_tau * xs - s_tau * xc ) / ( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) * y_dot; } } void lombscargle_cpu( const int x_shape, const int freqs_shape, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ freqs, float *__restrict__ pgram, const float y_dot ) { for ( int tid = 0; tid < freqs_shape; tid ++) { float freq = freqs[tid] ; float xc = 0; float xs = 0; float cc = 0; float ss = 0; float cs = 0; float c; float s; for ( int j = 0; j < x_shape; j++ ) { sincosf( freq * x[j], &s, &c ); xc += y[j] * c; xs += y[j] * s; cc += c * c; ss += s * s; cs += c * s; } float c_tau; float s_tau; float tau = atan2f( 2.0f * cs, cc - ss ) / ( 2.0f * freq ) ; sincosf( freq * tau, &s_tau, &c_tau ); float c_tau2 = c_tau * c_tau ; float s_tau2 = s_tau * s_tau ; float cs_tau = 2.0f * c_tau * s_tau ; pgram[tid] = ( 0.5f * ( ( ( c_tau * xc + s_tau * xs ) * ( c_tau * xc + s_tau * xs ) / ( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) + ( ( c_tau * xs - s_tau * xc ) * ( c_tau * xs - s_tau * xc ) / ( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) * y_dot; } } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); const int x_shape = 1000; const int freqs_shape = 100000; const float A = 2.f; const float w = 1.0f; const float phi = 1.57f; float* x = (float*) malloc (sizeof(float)*x_shape); float* y = (float*) malloc (sizeof(float)*x_shape); float* f = (float*) malloc (sizeof(float)*freqs_shape); float* p = (float*) malloc (sizeof(float)*freqs_shape); float* p2 = (float*) malloc (sizeof(float)*freqs_shape); for (int i = 0; i < x_shape; i++) x[i] = 0.01f + i*(31.4f - 0.01f)/x_shape; for (int i = 0; i < x_shape; i++) y[i] = A * sinf(w*x[i]+phi); for (int i = 0; i < freqs_shape; i++) f[i] = 0.01f + i*(10.f-0.01f)/freqs_shape; const float y_dot = 2.0f/1.5f; float* d_x; float* d_y; float* d_f; float* d_p; hipMalloc((void**)&d_x, sizeof(float)*x_shape); hipMalloc((void**)&d_y, sizeof(float)*x_shape); hipMalloc((void**)&d_f, sizeof(float)*freqs_shape); hipMalloc((void**)&d_p, sizeof(float)*freqs_shape); hipMemcpy(d_x, x, sizeof(float)*x_shape, hipMemcpyHostToDevice); hipMemcpy(d_y, y, sizeof(float)*x_shape, hipMemcpyHostToDevice); hipMemcpy(d_f, f, sizeof(float)*freqs_shape, hipMemcpyHostToDevice); dim3 grids ((freqs_shape + 255)/256*256); dim3 threads (256); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int n = 0; n < repeat; n++) hipLaunchKernelGGL(( lombscargle), dim3(grids), dim3(threads), 0, 0, x_shape, freqs_shape, d_x, d_y, d_f, d_p, y_dot); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (us)\n", (time * 1e-3) / repeat); hipMemcpy(p, d_p, sizeof(float)*freqs_shape, hipMemcpyDeviceToHost); // verification lombscargle_cpu(x_shape, freqs_shape, x, y, f, p2, y_dot); bool error = false; for (int i = 0; i < freqs_shape; i++) { if (fabsf(p[i]-p2[i]) > 1e-3f) { printf("%.3f %.3f\n", p[i], p2[i]); error = true; break; } } printf("%s\n", error ? "FAIL" : "PASS"); hipFree(d_x); hipFree(d_y); hipFree(d_f); hipFree(d_p); free(x); free(y); free(f); free(p); free(p2); return 0; }
c95546c7d47163e1d63a504e5f689b1391e132d3.cu
// Copyright (c) 2019-2020, NVIDIA CORPORATION. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /////////////////////////////////////////////////////////////////////////////// // LOMBSCARGLE // /////////////////////////////////////////////////////////////////////////////// /* import cupy as cp import matplotlib.pyplot as plt First define some input parameters for the signal: A = 2. w = 1. phi = 0.5 * cp.pi nin = 10000 nout = 1000000 r = cp.random.rand(nin) x = cp.linspace(0.01, 10*cp.pi, nin) Plot a sine wave for the selected times: y = A * cp.sin(w*x+phi) Define the array of frequencies for which to compute the periodogram: f = cp.linspace(0.01, 10, nout) Calculate Lomb-Scargle periodogram: pgram = cusignal.lombscargle(x, y, f, normalize=True) */ #include <math.h> #include <stdlib.h> #include <stdio.h> #include <chrono> #include <cuda.h> __global__ void lombscargle( const int x_shape, const int freqs_shape, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ freqs, float *__restrict__ pgram, const float y_dot ) { const int tx = ( blockIdx.x * blockDim.x + threadIdx.x ) ; const int stride = ( blockDim.x * gridDim.x ) ; for ( int tid = tx; tid < freqs_shape; tid += stride ) { float freq = freqs[tid] ; float xc = 0; float xs = 0; float cc = 0; float ss = 0; float cs = 0; float c; float s; for ( int j = 0; j < x_shape; j++ ) { sincosf( freq * x[j], &s, &c ); xc += y[j] * c; xs += y[j] * s; cc += c * c; ss += s * s; cs += c * s; } float c_tau; float s_tau; float tau = atan2f( 2.0f * cs, cc - ss ) / ( 2.0f * freq ) ; sincosf( freq * tau, &s_tau, &c_tau ); float c_tau2 = c_tau * c_tau ; float s_tau2 = s_tau * s_tau ; float cs_tau = 2.0f * c_tau * s_tau ; pgram[tid] = ( 0.5f * ( ( ( c_tau * xc + s_tau * xs ) * ( c_tau * xc + s_tau * xs ) / ( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) + ( ( c_tau * xs - s_tau * xc ) * ( c_tau * xs - s_tau * xc ) / ( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) * y_dot; } } void lombscargle_cpu( const int x_shape, const int freqs_shape, const float *__restrict__ x, const float *__restrict__ y, const float *__restrict__ freqs, float *__restrict__ pgram, const float y_dot ) { for ( int tid = 0; tid < freqs_shape; tid ++) { float freq = freqs[tid] ; float xc = 0; float xs = 0; float cc = 0; float ss = 0; float cs = 0; float c; float s; for ( int j = 0; j < x_shape; j++ ) { sincosf( freq * x[j], &s, &c ); xc += y[j] * c; xs += y[j] * s; cc += c * c; ss += s * s; cs += c * s; } float c_tau; float s_tau; float tau = atan2f( 2.0f * cs, cc - ss ) / ( 2.0f * freq ) ; sincosf( freq * tau, &s_tau, &c_tau ); float c_tau2 = c_tau * c_tau ; float s_tau2 = s_tau * s_tau ; float cs_tau = 2.0f * c_tau * s_tau ; pgram[tid] = ( 0.5f * ( ( ( c_tau * xc + s_tau * xs ) * ( c_tau * xc + s_tau * xs ) / ( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) + ( ( c_tau * xs - s_tau * xc ) * ( c_tau * xs - s_tau * xc ) / ( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) * y_dot; } } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); const int x_shape = 1000; const int freqs_shape = 100000; const float A = 2.f; const float w = 1.0f; const float phi = 1.57f; float* x = (float*) malloc (sizeof(float)*x_shape); float* y = (float*) malloc (sizeof(float)*x_shape); float* f = (float*) malloc (sizeof(float)*freqs_shape); float* p = (float*) malloc (sizeof(float)*freqs_shape); float* p2 = (float*) malloc (sizeof(float)*freqs_shape); for (int i = 0; i < x_shape; i++) x[i] = 0.01f + i*(31.4f - 0.01f)/x_shape; for (int i = 0; i < x_shape; i++) y[i] = A * sinf(w*x[i]+phi); for (int i = 0; i < freqs_shape; i++) f[i] = 0.01f + i*(10.f-0.01f)/freqs_shape; const float y_dot = 2.0f/1.5f; float* d_x; float* d_y; float* d_f; float* d_p; cudaMalloc((void**)&d_x, sizeof(float)*x_shape); cudaMalloc((void**)&d_y, sizeof(float)*x_shape); cudaMalloc((void**)&d_f, sizeof(float)*freqs_shape); cudaMalloc((void**)&d_p, sizeof(float)*freqs_shape); cudaMemcpy(d_x, x, sizeof(float)*x_shape, cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, sizeof(float)*x_shape, cudaMemcpyHostToDevice); cudaMemcpy(d_f, f, sizeof(float)*freqs_shape, cudaMemcpyHostToDevice); dim3 grids ((freqs_shape + 255)/256*256); dim3 threads (256); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int n = 0; n < repeat; n++) lombscargle<<<grids, threads>>>(x_shape, freqs_shape, d_x, d_y, d_f, d_p, y_dot); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (us)\n", (time * 1e-3) / repeat); cudaMemcpy(p, d_p, sizeof(float)*freqs_shape, cudaMemcpyDeviceToHost); // verification lombscargle_cpu(x_shape, freqs_shape, x, y, f, p2, y_dot); bool error = false; for (int i = 0; i < freqs_shape; i++) { if (fabsf(p[i]-p2[i]) > 1e-3f) { printf("%.3f %.3f\n", p[i], p2[i]); error = true; break; } } printf("%s\n", error ? "FAIL" : "PASS"); cudaFree(d_x); cudaFree(d_y); cudaFree(d_f); cudaFree(d_p); free(x); free(y); free(f); free(p); free(p2); return 0; }
848e206bcc8a99762662faa936171d9e1f25b5d8.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <pthread.h> #include <cstdlib> #include <vector> #define COMPRESSION_BATCH_SIZE 32 using namespace std; struct ThreadArg { float *original_data; long num_elements; int thread_num; float ***compressed_data; bool **compressed_data_taken; unsigned int *mask; }; struct CompressedPos { long compressed_data_batch; long offset; }; int n_threads = 8; int n_compressed_data_batches = 8; long layer_sizes_alexnet[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256, 13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384, 13l * 13 * 256, 6l * 6 * 256}; bool layer_compress_alexnet[] = {true, true, true, true, true, true, true, true}; long layer_density_alexnet[] = {50, 80, 40, 60, 70, 70, 30, 60}; int num_layers_alexnet = 8; long layer_sizes_vgg[] = {224l * 224 * 64, 224l * 224 * 64, 112l * 112 * 64, 112l * 112 * 128, 112l * 112 * 128, 56l * 56 * 128, 56l * 56 * 256, 56l * 56 * 256, 56l * 56 * 256, 28l * 28 * 256, 28l * 28 * 512, 28l * 28 * 512, 28l * 28 * 512, 14l * 14 * 512, 14l * 14 * 512, 14l * 14 * 512, 14l * 14 * 512, 7l * 7 * 512}; long layer_density_vgg[] = {50, 20, 30, 20, 10, 20, 20, 20, 10, 20, 20, 10, 10, 10, 20, 20, 10, 15 }; bool layer_compress_vgg[] = {true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true}; int num_layers_vgg = 18; // long *layer_sizes = layer_sizes_alexnet; // bool *layer_compress = layer_compress_alexnet; // long *layer_density = layer_density_alexnet; // int num_layers = num_layers_alexnet; long *layer_sizes = layer_sizes_alexnet; bool *layer_compress = layer_compress_alexnet; long *layer_density = layer_density_alexnet; int num_layers = num_layers_alexnet; void *compressThread(void *arg) { ThreadArg *thread_arg = (ThreadArg *)arg; float *original_data = thread_arg->original_data; float ***compressed_data = thread_arg->compressed_data; bool **compressed_data_taken = thread_arg->compressed_data_taken; unsigned int *mask = thread_arg->mask; int thread_num = thread_arg->thread_num; long num_elements = thread_arg->num_elements; long start = thread_num * num_elements / n_threads; long n_compression_batches = num_elements / n_threads / COMPRESSION_BATCH_SIZE; long compressed_data_batch_size = num_elements / n_threads / n_compressed_data_batches; hipHostMalloc((void **)&compressed_data[thread_num], n_compressed_data_batches * sizeof(float *)); hipHostMalloc((void **)&compressed_data_taken[thread_num], n_compressed_data_batches * sizeof(bool)); for (int i = 0; i < n_compressed_data_batches; i++) { compressed_data_taken[thread_num][i] = false; } CompressedPos current_pos; current_pos.compressed_data_batch = -1, current_pos.offset = compressed_data_batch_size; for (long i = 0; i < n_compression_batches; i++) { long mask_pos = (i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE; mask[mask_pos] = 0; for (long j = i * COMPRESSION_BATCH_SIZE + start; j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) { if (original_data[j] > 0) { if (current_pos.offset == compressed_data_batch_size) { hipHostMalloc((void **)&compressed_data[thread_num][current_pos.compressed_data_batch + 1], compressed_data_batch_size * sizeof(float)); compressed_data_taken[thread_num][current_pos.compressed_data_batch + 1] = true; current_pos.compressed_data_batch = current_pos.compressed_data_batch + 1; current_pos.offset = 0; } mask[mask_pos] = (mask[mask_pos] << 1) + 1; compressed_data[thread_num][current_pos.compressed_data_batch][current_pos.offset] = original_data[j]; current_pos.offset += 1; } else { mask[mask_pos] = (mask[mask_pos] << 1); } } } return NULL; } void *decompressThread(void *arg) { ThreadArg *thread_arg = (ThreadArg *)arg; float *original_data = thread_arg->original_data; float ***compressed_data = thread_arg->compressed_data; bool **compressed_data_taken = thread_arg->compressed_data_taken; unsigned int *mask = thread_arg->mask; int thread_num = thread_arg->thread_num; long num_elements = thread_arg->num_elements; long start = thread_num * num_elements / n_threads; long n_compression_batches = num_elements / n_threads / COMPRESSION_BATCH_SIZE; long compressed_data_batch_size = num_elements / n_threads / n_compressed_data_batches; // hipHostMalloc((void **)&compressed_data[thread_num], n_compressed_data_batches * sizeof(float *)); CompressedPos current_pos; current_pos.compressed_data_batch = 0, current_pos.offset = 0; for (long i = 0; i < n_compression_batches; i++) { long mask_pos = (i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE; for (long j = i * COMPRESSION_BATCH_SIZE + start; j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) { if (mask[mask_pos] & 0x80000000 > 0) { original_data[j] = compressed_data[thread_num][current_pos.compressed_data_batch][current_pos.offset]; current_pos.offset += 1; if (current_pos.offset == compressed_data_batch_size) { current_pos.compressed_data_batch += 1; current_pos.offset = 0; } } else { original_data[j] = 0; } mask[mask_pos] = mask[mask_pos] << 1; } } for (int i = 0; i < n_compressed_data_batches; i++) { if (compressed_data_taken[thread_num][i]) hipHostFree(compressed_data[thread_num][i]); else break; } hipHostFree(compressed_data_taken[thread_num]); hipHostFree(compressed_data[thread_num]); return NULL; } int main() { int batch_size = 64; long total_space = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); vector<float ***> compressed_data_vec; vector<unsigned int *> mask_vec; vector<bool **> compressed_data_taken_vec; pthread_t threads[n_threads]; for (int i = 0; i < num_layers; i++) { layer_sizes[i] *= batch_size; } vector<float> compression_times; float total_milli = 0.0; for (int j = 0; j < num_layers; j++) { if (!layer_compress[j]) continue; long num_elements = layer_sizes[j]; float *original_data, ***compressed_data; bool **compressed_data_taken; unsigned int *mask; hipHostMalloc((void **)&original_data, num_elements * sizeof(float)); // hipHostMalloc((void **)&compressed_data, num_elements * sizeof(float)); // generate data for (long i = 0; i < num_elements; i++) { if (rand() % 100 < layer_density[j]) original_data[i] = 1; else original_data[i] = 0; } if (num_elements % n_threads != 0) { cout << "bad number of threads" << endl; exit(0); } if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) { cout << "bad num_elements or n_threads" << endl; exit(0); } cout << "starting " << j << endl; hipEventRecord(start); hipHostMalloc((void **)&compressed_data, n_threads * sizeof(float **)); hipHostMalloc((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE * sizeof(unsigned int)); hipHostMalloc((void **)&compressed_data_taken, n_threads * sizeof(bool *)); ThreadArg thread_arg[n_threads]; for (int i = 0; i < n_threads; i++) { thread_arg[i].original_data = original_data; thread_arg[i].compressed_data = compressed_data; thread_arg[i].compressed_data_taken = compressed_data_taken; thread_arg[i].mask = mask; thread_arg[i].thread_num = i; thread_arg[i].num_elements = num_elements; } for (int i = 0; i < n_threads; i++) { pthread_create(&threads[i], NULL, &compressThread, (void *)&thread_arg[i]); } for (int i = 0; i < n_threads; i++) { pthread_join(threads[i], NULL); } compressed_data_vec.push_back(compressed_data); mask_vec.push_back(mask); compressed_data_taken_vec.push_back(compressed_data_taken); hipHostFree(original_data); // for (int i = 0; i < 27 * 27 * 256 * 128; i++); hipEventRecord(stop); hipEventSynchronize(stop); float milli; hipEventElapsedTime(&milli, start, stop); compression_times.push_back(milli); total_milli += milli; // cout << milli << endl; hipHostFree(original_data); // hipHostFree(compressed_data); // hipHostFree(mask); } for (int i = 0; i < compression_times.size(); i++) { cout << compression_times[i] << endl; } cout << total_milli << endl; // calculating space consumed int k = 0; for (int j = 0; j < num_layers; j++) { long num_elements = layer_sizes[j]; long cur_space = 0; if (!layer_compress[j]) { cur_space = num_elements * sizeof(float); total_space += cur_space; continue; } bool **compressed_data_taken = compressed_data_taken_vec[k]; long compressed_data_batch_size = num_elements / n_threads / n_compressed_data_batches; for (int thread_num = 0; thread_num < n_threads; thread_num++) { for (int i = 0; i < n_compressed_data_batches; i++) { if (compressed_data_taken[thread_num][i]) cur_space += compressed_data_batch_size; else break; } } // add size of mask cur_space += num_elements / COMPRESSION_BATCH_SIZE; cur_space *= sizeof(float); total_space += cur_space; k++; } cout << "total_space_compressed(MB): " << total_space * 1.0 / (1024 * 1024) << endl; // { // int n; // cout << "waiting..\n"; // cin >> n; // } // decompression cout << "decompress" << endl; vector<float> decompression_times; float total_milli_decompress = 0.0; for (int j = num_layers - 1; j >= 0; j--) { if (!layer_compress[j]) continue; long num_elements = layer_sizes[j]; float *original_data, ***compressed_data; bool **compressed_data_taken; unsigned int *mask; compressed_data = compressed_data_vec.back(); mask = mask_vec.back(); compressed_data_taken = compressed_data_taken_vec.back(); compressed_data_vec.pop_back(); mask_vec.pop_back(); compressed_data_taken_vec.pop_back(); // hipHostMalloc((void **)&compressed_data, num_elements * sizeof(float)); cout << "starting " << j << endl; hipEventRecord(start); hipHostMalloc((void **)&original_data, num_elements * sizeof(float)); ThreadArg thread_arg[n_threads]; for (int i = 0; i < n_threads; i++) { thread_arg[i].original_data = original_data; thread_arg[i].compressed_data = compressed_data; thread_arg[i].compressed_data_taken = compressed_data_taken; thread_arg[i].mask = mask; thread_arg[i].thread_num = i; thread_arg[i].num_elements = num_elements; } for (int i = 0; i < n_threads; i++) { pthread_create(&threads[i], NULL, &decompressThread, (void *)&thread_arg[i]); } for (int i = 0; i < n_threads; i++) { pthread_join(threads[i], NULL); } hipHostFree(compressed_data_taken); hipHostFree(compressed_data); hipHostFree(mask); // hipHostFree(original_data); // for (int i = 0; i < 27 * 27 * 256 * 128; i++); hipEventRecord(stop); hipEventSynchronize(stop); float milli; hipEventElapsedTime(&milli, start, stop); decompression_times.insert(decompression_times.begin(), milli); total_milli_decompress += milli; // cout << milli << endl; // hipHostFree(compressed_data); // hipHostFree(mask); } for (int i = 0; i < decompression_times.size(); i++) { cout << decompression_times[i] << endl; } cout << total_milli_decompress << endl; // calculating total space total_space = 0; for (int j = 0; j < num_layers; j++) { long num_elements = layer_sizes[j]; long cur_space = 0; cur_space = num_elements * sizeof(float); total_space += cur_space; } cout << "total space(MB): " << total_space * 1.0 / (1024 * 1024) << endl; }
848e206bcc8a99762662faa936171d9e1f25b5d8.cu
#include <iostream> #include <pthread.h> #include <cstdlib> #include <vector> #define COMPRESSION_BATCH_SIZE 32 using namespace std; struct ThreadArg { float *original_data; long num_elements; int thread_num; float ***compressed_data; bool **compressed_data_taken; unsigned int *mask; }; struct CompressedPos { long compressed_data_batch; long offset; }; int n_threads = 8; int n_compressed_data_batches = 8; long layer_sizes_alexnet[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256, 13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384, 13l * 13 * 256, 6l * 6 * 256}; bool layer_compress_alexnet[] = {true, true, true, true, true, true, true, true}; long layer_density_alexnet[] = {50, 80, 40, 60, 70, 70, 30, 60}; int num_layers_alexnet = 8; long layer_sizes_vgg[] = {224l * 224 * 64, 224l * 224 * 64, 112l * 112 * 64, 112l * 112 * 128, 112l * 112 * 128, 56l * 56 * 128, 56l * 56 * 256, 56l * 56 * 256, 56l * 56 * 256, 28l * 28 * 256, 28l * 28 * 512, 28l * 28 * 512, 28l * 28 * 512, 14l * 14 * 512, 14l * 14 * 512, 14l * 14 * 512, 14l * 14 * 512, 7l * 7 * 512}; long layer_density_vgg[] = {50, 20, 30, 20, 10, 20, 20, 20, 10, 20, 20, 10, 10, 10, 20, 20, 10, 15 }; bool layer_compress_vgg[] = {true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true}; int num_layers_vgg = 18; // long *layer_sizes = layer_sizes_alexnet; // bool *layer_compress = layer_compress_alexnet; // long *layer_density = layer_density_alexnet; // int num_layers = num_layers_alexnet; long *layer_sizes = layer_sizes_alexnet; bool *layer_compress = layer_compress_alexnet; long *layer_density = layer_density_alexnet; int num_layers = num_layers_alexnet; void *compressThread(void *arg) { ThreadArg *thread_arg = (ThreadArg *)arg; float *original_data = thread_arg->original_data; float ***compressed_data = thread_arg->compressed_data; bool **compressed_data_taken = thread_arg->compressed_data_taken; unsigned int *mask = thread_arg->mask; int thread_num = thread_arg->thread_num; long num_elements = thread_arg->num_elements; long start = thread_num * num_elements / n_threads; long n_compression_batches = num_elements / n_threads / COMPRESSION_BATCH_SIZE; long compressed_data_batch_size = num_elements / n_threads / n_compressed_data_batches; cudaMallocHost((void **)&compressed_data[thread_num], n_compressed_data_batches * sizeof(float *)); cudaMallocHost((void **)&compressed_data_taken[thread_num], n_compressed_data_batches * sizeof(bool)); for (int i = 0; i < n_compressed_data_batches; i++) { compressed_data_taken[thread_num][i] = false; } CompressedPos current_pos; current_pos.compressed_data_batch = -1, current_pos.offset = compressed_data_batch_size; for (long i = 0; i < n_compression_batches; i++) { long mask_pos = (i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE; mask[mask_pos] = 0; for (long j = i * COMPRESSION_BATCH_SIZE + start; j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) { if (original_data[j] > 0) { if (current_pos.offset == compressed_data_batch_size) { cudaMallocHost((void **)&compressed_data[thread_num][current_pos.compressed_data_batch + 1], compressed_data_batch_size * sizeof(float)); compressed_data_taken[thread_num][current_pos.compressed_data_batch + 1] = true; current_pos.compressed_data_batch = current_pos.compressed_data_batch + 1; current_pos.offset = 0; } mask[mask_pos] = (mask[mask_pos] << 1) + 1; compressed_data[thread_num][current_pos.compressed_data_batch][current_pos.offset] = original_data[j]; current_pos.offset += 1; } else { mask[mask_pos] = (mask[mask_pos] << 1); } } } return NULL; } void *decompressThread(void *arg) { ThreadArg *thread_arg = (ThreadArg *)arg; float *original_data = thread_arg->original_data; float ***compressed_data = thread_arg->compressed_data; bool **compressed_data_taken = thread_arg->compressed_data_taken; unsigned int *mask = thread_arg->mask; int thread_num = thread_arg->thread_num; long num_elements = thread_arg->num_elements; long start = thread_num * num_elements / n_threads; long n_compression_batches = num_elements / n_threads / COMPRESSION_BATCH_SIZE; long compressed_data_batch_size = num_elements / n_threads / n_compressed_data_batches; // cudaMallocHost((void **)&compressed_data[thread_num], n_compressed_data_batches * sizeof(float *)); CompressedPos current_pos; current_pos.compressed_data_batch = 0, current_pos.offset = 0; for (long i = 0; i < n_compression_batches; i++) { long mask_pos = (i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE; for (long j = i * COMPRESSION_BATCH_SIZE + start; j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) { if (mask[mask_pos] & 0x80000000 > 0) { original_data[j] = compressed_data[thread_num][current_pos.compressed_data_batch][current_pos.offset]; current_pos.offset += 1; if (current_pos.offset == compressed_data_batch_size) { current_pos.compressed_data_batch += 1; current_pos.offset = 0; } } else { original_data[j] = 0; } mask[mask_pos] = mask[mask_pos] << 1; } } for (int i = 0; i < n_compressed_data_batches; i++) { if (compressed_data_taken[thread_num][i]) cudaFreeHost(compressed_data[thread_num][i]); else break; } cudaFreeHost(compressed_data_taken[thread_num]); cudaFreeHost(compressed_data[thread_num]); return NULL; } int main() { int batch_size = 64; long total_space = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); vector<float ***> compressed_data_vec; vector<unsigned int *> mask_vec; vector<bool **> compressed_data_taken_vec; pthread_t threads[n_threads]; for (int i = 0; i < num_layers; i++) { layer_sizes[i] *= batch_size; } vector<float> compression_times; float total_milli = 0.0; for (int j = 0; j < num_layers; j++) { if (!layer_compress[j]) continue; long num_elements = layer_sizes[j]; float *original_data, ***compressed_data; bool **compressed_data_taken; unsigned int *mask; cudaMallocHost((void **)&original_data, num_elements * sizeof(float)); // cudaMallocHost((void **)&compressed_data, num_elements * sizeof(float)); // generate data for (long i = 0; i < num_elements; i++) { if (rand() % 100 < layer_density[j]) original_data[i] = 1; else original_data[i] = 0; } if (num_elements % n_threads != 0) { cout << "bad number of threads" << endl; exit(0); } if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) { cout << "bad num_elements or n_threads" << endl; exit(0); } cout << "starting " << j << endl; cudaEventRecord(start); cudaMallocHost((void **)&compressed_data, n_threads * sizeof(float **)); cudaMallocHost((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE * sizeof(unsigned int)); cudaMallocHost((void **)&compressed_data_taken, n_threads * sizeof(bool *)); ThreadArg thread_arg[n_threads]; for (int i = 0; i < n_threads; i++) { thread_arg[i].original_data = original_data; thread_arg[i].compressed_data = compressed_data; thread_arg[i].compressed_data_taken = compressed_data_taken; thread_arg[i].mask = mask; thread_arg[i].thread_num = i; thread_arg[i].num_elements = num_elements; } for (int i = 0; i < n_threads; i++) { pthread_create(&threads[i], NULL, &compressThread, (void *)&thread_arg[i]); } for (int i = 0; i < n_threads; i++) { pthread_join(threads[i], NULL); } compressed_data_vec.push_back(compressed_data); mask_vec.push_back(mask); compressed_data_taken_vec.push_back(compressed_data_taken); cudaFreeHost(original_data); // for (int i = 0; i < 27 * 27 * 256 * 128; i++); cudaEventRecord(stop); cudaEventSynchronize(stop); float milli; cudaEventElapsedTime(&milli, start, stop); compression_times.push_back(milli); total_milli += milli; // cout << milli << endl; cudaFreeHost(original_data); // cudaFreeHost(compressed_data); // cudaFreeHost(mask); } for (int i = 0; i < compression_times.size(); i++) { cout << compression_times[i] << endl; } cout << total_milli << endl; // calculating space consumed int k = 0; for (int j = 0; j < num_layers; j++) { long num_elements = layer_sizes[j]; long cur_space = 0; if (!layer_compress[j]) { cur_space = num_elements * sizeof(float); total_space += cur_space; continue; } bool **compressed_data_taken = compressed_data_taken_vec[k]; long compressed_data_batch_size = num_elements / n_threads / n_compressed_data_batches; for (int thread_num = 0; thread_num < n_threads; thread_num++) { for (int i = 0; i < n_compressed_data_batches; i++) { if (compressed_data_taken[thread_num][i]) cur_space += compressed_data_batch_size; else break; } } // add size of mask cur_space += num_elements / COMPRESSION_BATCH_SIZE; cur_space *= sizeof(float); total_space += cur_space; k++; } cout << "total_space_compressed(MB): " << total_space * 1.0 / (1024 * 1024) << endl; // { // int n; // cout << "waiting..\n"; // cin >> n; // } // decompression cout << "decompress" << endl; vector<float> decompression_times; float total_milli_decompress = 0.0; for (int j = num_layers - 1; j >= 0; j--) { if (!layer_compress[j]) continue; long num_elements = layer_sizes[j]; float *original_data, ***compressed_data; bool **compressed_data_taken; unsigned int *mask; compressed_data = compressed_data_vec.back(); mask = mask_vec.back(); compressed_data_taken = compressed_data_taken_vec.back(); compressed_data_vec.pop_back(); mask_vec.pop_back(); compressed_data_taken_vec.pop_back(); // cudaMallocHost((void **)&compressed_data, num_elements * sizeof(float)); cout << "starting " << j << endl; cudaEventRecord(start); cudaMallocHost((void **)&original_data, num_elements * sizeof(float)); ThreadArg thread_arg[n_threads]; for (int i = 0; i < n_threads; i++) { thread_arg[i].original_data = original_data; thread_arg[i].compressed_data = compressed_data; thread_arg[i].compressed_data_taken = compressed_data_taken; thread_arg[i].mask = mask; thread_arg[i].thread_num = i; thread_arg[i].num_elements = num_elements; } for (int i = 0; i < n_threads; i++) { pthread_create(&threads[i], NULL, &decompressThread, (void *)&thread_arg[i]); } for (int i = 0; i < n_threads; i++) { pthread_join(threads[i], NULL); } cudaFreeHost(compressed_data_taken); cudaFreeHost(compressed_data); cudaFreeHost(mask); // cudaFreeHost(original_data); // for (int i = 0; i < 27 * 27 * 256 * 128; i++); cudaEventRecord(stop); cudaEventSynchronize(stop); float milli; cudaEventElapsedTime(&milli, start, stop); decompression_times.insert(decompression_times.begin(), milli); total_milli_decompress += milli; // cout << milli << endl; // cudaFreeHost(compressed_data); // cudaFreeHost(mask); } for (int i = 0; i < decompression_times.size(); i++) { cout << decompression_times[i] << endl; } cout << total_milli_decompress << endl; // calculating total space total_space = 0; for (int j = 0; j < num_layers; j++) { long num_elements = layer_sizes[j]; long cur_space = 0; cur_space = num_elements * sizeof(float); total_space += cur_space; } cout << "total space(MB): " << total_space * 1.0 / (1024 * 1024) << endl; }
d44f91c538c9a0385003fe8f1c76e07f7174e3a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __host__ void matrix_multiply_CPU(int *h_p, int *h_q, int *h_m, int width) { for (int i = 0; i < width; i++) for (int j = 0; j< width; j++) for (int k = 0; k < width; k++) { int tmp = h_p[i*width+k] * h_q[k*width+j]; h_m[i*width+j] += tmp; } } // __global__ runs on the GPU & can be called from host // __global__ must return void __global__ void matrix_multiply_GPU(int *d_p, int *d_q, int *d_m, int width) { // int row = threadIdx.y + blockIdx.y * blockDim.y; // int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y; int col = threadIdx.x; for (int k = 0; k < width; k++) { int tmp = d_p[row*width+k] * d_q[k*width+col]; d_m[row*width+col] += tmp; } } int main() { int *h_p, *h_q, *h_m; const int width = 5; h_p = (int*)malloc(sizeof(int) * width * width); h_q = (int*)malloc(sizeof(int) * width * width); h_m = (int*)malloc(sizeof(int) * width * width); for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) h_p[i*width+j] = i*width + j; for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) h_q[i*width+j] = i*width + j; for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) h_m[i*width+j] = 0; matrix_multiply_CPU(h_p, h_q, h_m, width); printf("The result of matrix_multiply_CPU:\n"); for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) { printf("%d",h_m[i*width+j]); printf((j != width-1) ? "\t" : "\n"); } printf("---------------------\n"); // decalar GPU memory pointers int *d_p, *d_q, *d_m; // allocate GPU memory hipMalloc((void **)&d_p, sizeof(int) * width * width); hipMalloc((void **)&d_q, sizeof(int) * width * width); hipMalloc((void **)&d_m, sizeof(int) * width * width); // zero out GPU memory hipMemset((void *)d_m, 0, sizeof(int) * width * width); // transfer the matrix to the GPU hipMemcpy((void *)d_p, (void *)h_p, sizeof(int) * width * width, hipMemcpyHostToDevice); hipMemcpy((void *)d_q, (void *)h_q, sizeof(int) * width * width, hipMemcpyHostToDevice); // const dim3 cat(1, 1); const dim3 dog(width, width, 1); // launch the kernel hipLaunchKernelGGL(( matrix_multiply_GPU), dim3(cat), dim3(dog), 0, 0, d_p, d_q, d_m, width); // copy back the matrix from GPU to the CPU hipMemcpy((void *)h_m, (void *)d_m, sizeof(int) * width * width, hipMemcpyDeviceToHost); // print the matrix printf("The result of matrix_multiply_GPU:\n"); for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) { printf("%d",h_m[i*width+j]); printf((j != width-1) ? "\t" : "\n"); } // free memory allocation free(h_p); free(h_q); free(h_m); hipFree(d_p); hipFree(d_q); hipFree(d_m); return 0; }
d44f91c538c9a0385003fe8f1c76e07f7174e3a4.cu
#include <stdio.h> __host__ void matrix_multiply_CPU(int *h_p, int *h_q, int *h_m, int width) { for (int i = 0; i < width; i++) for (int j = 0; j< width; j++) for (int k = 0; k < width; k++) { int tmp = h_p[i*width+k] * h_q[k*width+j]; h_m[i*width+j] += tmp; } } // __global__ runs on the GPU & can be called from host // __global__ must return void __global__ void matrix_multiply_GPU(int *d_p, int *d_q, int *d_m, int width) { // int row = threadIdx.y + blockIdx.y * blockDim.y; // int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y; int col = threadIdx.x; for (int k = 0; k < width; k++) { int tmp = d_p[row*width+k] * d_q[k*width+col]; d_m[row*width+col] += tmp; } } int main() { int *h_p, *h_q, *h_m; const int width = 5; h_p = (int*)malloc(sizeof(int) * width * width); h_q = (int*)malloc(sizeof(int) * width * width); h_m = (int*)malloc(sizeof(int) * width * width); for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) h_p[i*width+j] = i*width + j; for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) h_q[i*width+j] = i*width + j; for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) h_m[i*width+j] = 0; matrix_multiply_CPU(h_p, h_q, h_m, width); printf("The result of matrix_multiply_CPU:\n"); for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) { printf("%d",h_m[i*width+j]); printf((j != width-1) ? "\t" : "\n"); } printf("---------------------\n"); // decalar GPU memory pointers int *d_p, *d_q, *d_m; // allocate GPU memory cudaMalloc((void **)&d_p, sizeof(int) * width * width); cudaMalloc((void **)&d_q, sizeof(int) * width * width); cudaMalloc((void **)&d_m, sizeof(int) * width * width); // zero out GPU memory cudaMemset((void *)d_m, 0, sizeof(int) * width * width); // transfer the matrix to the GPU cudaMemcpy((void *)d_p, (void *)h_p, sizeof(int) * width * width, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_q, (void *)h_q, sizeof(int) * width * width, cudaMemcpyHostToDevice); // const dim3 cat(1, 1); const dim3 dog(width, width, 1); // launch the kernel matrix_multiply_GPU<<<cat, dog>>>(d_p, d_q, d_m, width); // copy back the matrix from GPU to the CPU cudaMemcpy((void *)h_m, (void *)d_m, sizeof(int) * width * width, cudaMemcpyDeviceToHost); // print the matrix printf("The result of matrix_multiply_GPU:\n"); for (int i = 0; i < width; i++) for (int j = 0; j < width; j++) { printf("%d",h_m[i*width+j]); printf((j != width-1) ? "\t" : "\n"); } // free memory allocation free(h_p); free(h_q); free(h_m); cudaFree(d_p); cudaFree(d_q); cudaFree(d_m); return 0; }
e7d0842553ed948d24a492a3e1c6876bb2af623a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { #ifdef USE_ROCM __device__ int_tp compute_uncropped_index( int_tp index, const int_tp ndims, const int_tp* src_strides, const int_tp* dst_strides, const int_tp* offsets) { int_tp dst_index = index; int_tp src_index = 0; for (int_tp i = 0; i < ndims; ++i) { int_tp coord = dst_index / dst_strides[i]; dst_index -= coord * dst_strides[i]; src_index += src_strides[i] * (coord + offsets[i]); } return src_index; } template <typename Dtype> __global__ void crop_kernel_forward(const int_tp nthreads, const int_tp ndims, const int_tp* src_strides, const int_tp* dst_strides, const int_tp* offsets, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { int_tp src_index = compute_uncropped_index( index, ndims, src_strides, dst_strides, offsets); dst[index] = src[src_index]; } } template <typename Dtype> __global__ void crop_kernel_backward(const int_tp nthreads, const int_tp ndims, const int_tp* src_strides, const int_tp* dst_strides, const int_tp* offsets, Dtype* src, const Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { int_tp src_index = compute_uncropped_index( index, ndims, src_strides, dst_strides, offsets); src[src_index] = dst[index]; } } #endif // USE_ROCM template<typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int_tp n = top[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_forward CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)(n, bottom[0]->num_axes(), src_strides_.gpu_data(), dst_strides_.gpu_data(), offsets.gpu_data(), bottom_data, top_data); #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_crop_forward = program.get_kernel( CL_KERNEL_SELECT("crop_forward")); viennacl::ocl::enqueue( oclk_crop_forward(n, bottom[0]->num_axes(), WrapHandle((cl_mem)(src_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(dst_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(offsets.gpu_data()), &ctx), WrapHandle((cl_mem)(bottom_data), &ctx), 0, WrapHandle((cl_mem)(top_data), &ctx), 0), ctx.get_queue()); #endif // USE_GREENTEA } } template<typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int_tp n = top[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_backward CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)(n, bottom[0]->num_axes(), src_strides_.gpu_data(), dst_strides_.gpu_data(), offsets.gpu_data(), bottom_diff, top_diff); } #endif // USE_ROCM } else { #ifdef USE_GREENTEA if (propagate_down[0]) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); greentea_gpu_set<Dtype>(this->device_->id(), bottom[0]->count(), 0, (cl_mem)bottom_diff, 0); viennacl::ocl::kernel &oclk_crop_backward = program.get_kernel( CL_KERNEL_SELECT("crop_backward")); viennacl::ocl::enqueue( oclk_crop_backward(n, bottom[0]->num_axes(), WrapHandle((cl_mem)(src_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(dst_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(offsets.gpu_data()), &ctx), WrapHandle((cl_mem)(bottom_diff), &ctx), 0, WrapHandle((cl_mem)(top_diff), &ctx), 0), ctx.get_queue()); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
e7d0842553ed948d24a492a3e1c6876bb2af623a.cu
#include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { #ifdef USE_CUDA __device__ int_tp compute_uncropped_index( int_tp index, const int_tp ndims, const int_tp* src_strides, const int_tp* dst_strides, const int_tp* offsets) { int_tp dst_index = index; int_tp src_index = 0; for (int_tp i = 0; i < ndims; ++i) { int_tp coord = dst_index / dst_strides[i]; dst_index -= coord * dst_strides[i]; src_index += src_strides[i] * (coord + offsets[i]); } return src_index; } template <typename Dtype> __global__ void crop_kernel_forward(const int_tp nthreads, const int_tp ndims, const int_tp* src_strides, const int_tp* dst_strides, const int_tp* offsets, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { int_tp src_index = compute_uncropped_index( index, ndims, src_strides, dst_strides, offsets); dst[index] = src[src_index]; } } template <typename Dtype> __global__ void crop_kernel_backward(const int_tp nthreads, const int_tp ndims, const int_tp* src_strides, const int_tp* dst_strides, const int_tp* offsets, Dtype* src, const Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { int_tp src_index = compute_uncropped_index( index, ndims, src_strides, dst_strides, offsets); src[src_index] = dst[index]; } } #endif // USE_CUDA template<typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int_tp n = top[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_forward CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)(n, bottom[0]->num_axes(), src_strides_.gpu_data(), dst_strides_.gpu_data(), offsets.gpu_data(), bottom_data, top_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); viennacl::ocl::kernel &oclk_crop_forward = program.get_kernel( CL_KERNEL_SELECT("crop_forward")); viennacl::ocl::enqueue( oclk_crop_forward(n, bottom[0]->num_axes(), WrapHandle((cl_mem)(src_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(dst_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(offsets.gpu_data()), &ctx), WrapHandle((cl_mem)(bottom_data), &ctx), 0, WrapHandle((cl_mem)(top_data), &ctx), 0), ctx.get_queue()); #endif // USE_GREENTEA } } template<typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int_tp n = top[0]->count(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_backward CUDA_KERNEL(CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS)(n, bottom[0]->num_axes(), src_strides_.gpu_data(), dst_strides_.gpu_data(), offsets.gpu_data(), bottom_diff, top_diff); } #endif // USE_CUDA } else { #ifdef USE_GREENTEA if (propagate_down[0]) { viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); greentea_gpu_set<Dtype>(this->device_->id(), bottom[0]->count(), 0, (cl_mem)bottom_diff, 0); viennacl::ocl::kernel &oclk_crop_backward = program.get_kernel( CL_KERNEL_SELECT("crop_backward")); viennacl::ocl::enqueue( oclk_crop_backward(n, bottom[0]->num_axes(), WrapHandle((cl_mem)(src_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(dst_strides_.gpu_data()), &ctx), WrapHandle((cl_mem)(offsets.gpu_data()), &ctx), WrapHandle((cl_mem)(bottom_diff), &ctx), 0, WrapHandle((cl_mem)(top_diff), &ctx), 0), ctx.get_queue()); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
ceefcd20e019e245bc014505c12784f27b4cec34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define SRC_SIZE 65536 #define DST_SIZE 65536 #define CPY_SIZE 65536 __global__ void bad_memcpy_kernel() { int* devMemSrc = (int*)malloc(SRC_SIZE*sizeof(int)); int* devMemDest = (int*)malloc(DST_SIZE*sizeof(int)); memcpy(devMemDest, devMemSrc, CPY_SIZE*sizeof(int)); free(devMemDest); free(devMemSrc); } int main() { hipThreadSetLimit(hipLimitMallocHeapSize, 4*CPY_SIZE*sizeof(int)); hipLaunchKernelGGL(( bad_memcpy_kernel), dim3(1), dim3(1), 0, 0, ); hipDeviceReset(); return 0; }
ceefcd20e019e245bc014505c12784f27b4cec34.cu
#include <stdio.h> #define SRC_SIZE 65536 #define DST_SIZE 65536 #define CPY_SIZE 65536 __global__ void bad_memcpy_kernel() { int* devMemSrc = (int*)malloc(SRC_SIZE*sizeof(int)); int* devMemDest = (int*)malloc(DST_SIZE*sizeof(int)); memcpy(devMemDest, devMemSrc, CPY_SIZE*sizeof(int)); free(devMemDest); free(devMemSrc); } int main() { cudaThreadSetLimit(cudaLimitMallocHeapSize, 4*CPY_SIZE*sizeof(int)); bad_memcpy_kernel<<<1, 1>>>(); cudaDeviceReset(); return 0; }
a0b2af9e531d59ece62bf1efa7ce5ca46c3d66c2.hip
// !!! This is a file automatically generated by hipify!!! //----------------------------------------------------------------------------- //! Copyright (c) 2014-2015, Benjamin Worpitz //! All rights reserved. //! //! Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met : //! * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. //! * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. //! * Neither the name of the TU Dresden nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. //! //! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. //! IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) //! HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //----------------------------------------------------------------------------- #if defined(MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE) || defined(MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE) || defined(MATMUL_BUILD_PAR_CUDA_DYN_BLOCK_SIZE) || defined(MATMUL_BUILD_PAR_CUDA_MEMCPY_DYN_BLOCK_SIZE) #include <matmul/par/Cuda.h> #include <matmul/common/Cuda.h> // matmul_gemm_wrap_memcpy_host_cuda_2d #include <matmul/common/Mat.h> // matmul_mat_gemm_early_out #include <hip/hip_runtime.h> #include <stdio.h> // printf #include <math.h> // ceil #include <algorithm> // std::min #define MATMUL_CUDA_RT_CHECK(cmd) {hipError_t error = cmd; if(error!=hipSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", hipGetErrorString(error));}} #ifdef MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_fixed_block_size_2d_static_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. __shared__ TElem pBlockSharedA[MATMUL_CUDA_FIXED_BLOCK_SIZE][MATMUL_CUDA_FIXED_BLOCK_SIZE]; __shared__ TElem pBlockSharedB[MATMUL_CUDA_FIXED_BLOCK_SIZE][MATMUL_CUDA_FIXED_BLOCK_SIZE]; // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. auto const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k)/static_cast<float>(uiBlockThreadsExtent)))); for(TIdx k2=0; k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiBlockThreadIdxY][uiBlockThreadIdxX] = ((!bInsideA) || (uiAIdxX>=k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiBlockThreadIdxY][uiBlockThreadIdxX] = ((!bInsideB) || (uiBIdxY>=k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for(TIdx k3 = 0; k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY][k3] * pBlockSharedB[k3][uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if(bInsideC) { auto const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_fixed_block_size_2d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } dim3 const dimBlock(MATMUL_CUDA_FIXED_BLOCK_SIZE, MATMUL_CUDA_FIXED_BLOCK_SIZE); float const fGridThreadExtentX = ceil(((float)n) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); float const fGridThreadExtentY = ceil(((float)m) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); unsigned int const uiGridThreadExtentX = (unsigned int)fGridThreadExtentX; unsigned int const uiGridThreadExtentY = (unsigned int)fGridThreadExtentY; dim3 const dimGrid(uiGridThreadExtentX, uiGridThreadExtentY); hipLaunchKernelGGL(( matmul_gemm_par_cuda_fixed_block_size_2d_static_shared_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(hipDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_fixed_block_size_2d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_fixed_block_size_2d_static_shared); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_fixed_block_size_1d_static_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. __shared__ TElem pBlockSharedA[MATMUL_CUDA_FIXED_BLOCK_SIZE*MATMUL_CUDA_FIXED_BLOCK_SIZE]; __shared__ TElem pBlockSharedB[MATMUL_CUDA_FIXED_BLOCK_SIZE*MATMUL_CUDA_FIXED_BLOCK_SIZE]; auto const uiSharedBlockIdx1d(uiBlockThreadIdxY*uiBlockThreadsExtentX + uiBlockThreadIdxX); // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. auto const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k)/static_cast<float>(uiBlockThreadsExtent)))); for(TIdx k2=0; k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiSharedBlockIdx1d] = ((!bInsideA) || (uiAIdxX>=k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiSharedBlockIdx1d] = ((!bInsideB) || (uiBIdxY>=k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for(TIdx k3 = 0; k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY*uiBlockThreadsExtentX + k3] * pBlockSharedB[k3*uiBlockThreadsExtentY + uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if(bInsideC) { auto const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_fixed_block_size_1d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } dim3 const dimBlock(MATMUL_CUDA_FIXED_BLOCK_SIZE, MATMUL_CUDA_FIXED_BLOCK_SIZE); float const fGridThreadExtentX = ceil(((float)n) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); float const fGridThreadExtentY = ceil(((float)m) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); unsigned int const uiGridThreadExtentX = (unsigned int)fGridThreadExtentX; unsigned int const uiGridThreadExtentY = (unsigned int)fGridThreadExtentY; dim3 const dimGrid(uiGridThreadExtentX, uiGridThreadExtentY); hipLaunchKernelGGL(( matmul_gemm_par_cuda_fixed_block_size_1d_static_shared_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(hipDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_fixed_block_size_1d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_fixed_block_size_1d_static_shared); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. extern __shared__ TElem pBlockSharedA[]; auto * const pBlockSharedB(pBlockSharedA + uiBlockThreadsExtentX*uiBlockThreadsExtentY); auto const uiSharedBlockIdx1d(uiBlockThreadIdxY*uiBlockThreadsExtentX + uiBlockThreadIdxX); // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. auto const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k)/static_cast<float>(uiBlockThreadsExtent)))); for(TIdx k2=0; k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiSharedBlockIdx1d] = ((!bInsideA) || (uiAIdxX>=k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiSharedBlockIdx1d] = ((!bInsideB) || (uiBIdxY>=k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for(TIdx k3 = 0; k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY*uiBlockThreadsExtentX + k3] * pBlockSharedB[k3*uiBlockThreadsExtentY + uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if(bInsideC) { auto const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } dim3 const dimBlock(MATMUL_CUDA_FIXED_BLOCK_SIZE, MATMUL_CUDA_FIXED_BLOCK_SIZE); float const fGridThreadExtentX = ceil(((float)n) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); float const fGridThreadExtentY = ceil(((float)m) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); unsigned int const uiGridThreadExtentX = (unsigned int)fGridThreadExtentX; unsigned int const uiGridThreadExtentY = (unsigned int)fGridThreadExtentY; dim3 const dimGrid(uiGridThreadExtentX, uiGridThreadExtentY); hipLaunchKernelGGL(( matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared_kernel), dim3(dimGrid), dim3(dimBlock), 2u*sizeof(TElem)*MATMUL_CUDA_FIXED_BLOCK_SIZE*MATMUL_CUDA_FIXED_BLOCK_SIZE, 0, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(hipDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_fixed_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_DYN_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. extern __shared__ TElem pBlockSharedA[]; TElem * const pBlockSharedB(pBlockSharedA + uiBlockThreadsExtentX*uiBlockThreadsExtentY); TIdx const uiSharedBlockIdx1d(uiBlockThreadIdxY*uiBlockThreadsExtentX + uiBlockThreadIdxX); // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. TIdx const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k) / static_cast<float>(uiBlockThreadsExtent)))); for (TIdx k2(0); k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiSharedBlockIdx1d] = ((!bInsideA) || (uiAIdxX >= k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiSharedBlockIdx1d] = ((!bInsideB) || (uiBIdxY >= k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for (TIdx k3(0); k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY*uiBlockThreadsExtentX + k3] * pBlockSharedB[k3*uiBlockThreadsExtentY + uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if (bInsideC) { TIdx const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } MATMUL_CUDA_RT_CHECK(hipSetDevice(0)); hipStream_t stream; MATMUL_CUDA_RT_CHECK(hipStreamCreate(&stream)); // Get its properties. hipDeviceProp_t cudaDevProp; MATMUL_CUDA_RT_CHECK(hipGetDeviceProperties( &cudaDevProp, 0)); TIdx vuiGridThreadExtents[] = { m, n }; TIdx vuiBlockThreadExtents[] = { cudaDevProp.maxThreadsDim[0], cudaDevProp.maxThreadsDim[1] }; // Restrict the max block thread extents with the grid thread extents. // This removes dimensions not required in the given grid thread extents. // This has to be done before the uiMaxBlockThreadsCount clipping to get the maximum correctly. for (TIdx i(0); i<2; ++i) { vuiBlockThreadExtents[i] = ::min(vuiBlockThreadExtents[i], vuiGridThreadExtents[i]); } // Restrict it to its minimum component. // For example (512, 256) will get (256, 256). auto uiMinBlockThreadExtent(vuiBlockThreadExtents[0]); for (TIdx i(1); i<2; ++i) { uiMinBlockThreadExtent = ::min(uiMinBlockThreadExtent, vuiBlockThreadExtents[i]); } for (TIdx i(0); i<2; ++i) { vuiBlockThreadExtents[i] = uiMinBlockThreadExtent; } // Adjust vuiBlockThreadExtents if its product is too large. if ((vuiBlockThreadExtents[0] * vuiBlockThreadExtents[1]) > cudaDevProp.maxThreadsPerBlock) { // Satisfy the following equation: // udaDevProp.maxThreadsPerBlock >= vuiBlockThreadExtents[0]*vuiBlockThreadExtents[1] // For example 1024 >= 512 * 512 // For equal block thread extent this is easily the nth root of cudaDevProp.maxThreadsPerBlock. double const fNthRoot(::pow(cudaDevProp.maxThreadsPerBlock, 1.0 / 2.0)); auto const uiNthRoot(static_cast<TIdx>(fNthRoot)); for (TIdx i(0); i<2; ++i) { vuiBlockThreadExtents[i] = uiNthRoot; } } // Set the grid block extents (rounded to the next integer not less then the quotient. TIdx vuiGridBlockExtents[] = { 1, 1 }; for (TIdx i(0); i<2; ++i) { vuiGridBlockExtents[i] = static_cast<TIdx>( ::ceil(static_cast<double>(vuiGridThreadExtents[i]) / static_cast<double>(vuiBlockThreadExtents[i]))); } dim3 const dimBlock(vuiBlockThreadExtents[0], vuiBlockThreadExtents[1]); dim3 const dimGrid(vuiGridBlockExtents[0], vuiGridBlockExtents[1]); MATMUL_CUDA_RT_CHECK(hipSetDevice(0)); hipLaunchKernelGGL(( matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared_kernel), dim3(dimGrid), dim3(dimBlock), 2u*sizeof(TElem)*vuiBlockThreadExtents[0] * vuiBlockThreadExtents[1], stream, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(hipSetDevice(0)); MATMUL_CUDA_RT_CHECK(hipStreamSynchronize(stream)); MATMUL_CUDA_RT_CHECK(hipSetDevice(0)); MATMUL_CUDA_RT_CHECK(hipStreamDestroy(stream)); //MATMUL_CUDA_RT_CHECK(hipDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_DYN_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_dyn_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared); } #endif #endif
a0b2af9e531d59ece62bf1efa7ce5ca46c3d66c2.cu
//----------------------------------------------------------------------------- //! Copyright (c) 2014-2015, Benjamin Worpitz //! All rights reserved. //! //! Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met : //! * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. //! * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. //! * Neither the name of the TU Dresden nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. //! //! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. //! IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) //! HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //----------------------------------------------------------------------------- #if defined(MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE) || defined(MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE) || defined(MATMUL_BUILD_PAR_CUDA_DYN_BLOCK_SIZE) || defined(MATMUL_BUILD_PAR_CUDA_MEMCPY_DYN_BLOCK_SIZE) #include <matmul/par/Cuda.h> #include <matmul/common/Cuda.h> // matmul_gemm_wrap_memcpy_host_cuda_2d #include <matmul/common/Mat.h> // matmul_mat_gemm_early_out #include <cuda_runtime.h> #include <stdio.h> // printf #include <math.h> // ceil #include <algorithm> // std::min #define MATMUL_CUDA_RT_CHECK(cmd) {cudaError_t error = cmd; if(error!=cudaSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}} #ifdef MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_fixed_block_size_2d_static_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. __shared__ TElem pBlockSharedA[MATMUL_CUDA_FIXED_BLOCK_SIZE][MATMUL_CUDA_FIXED_BLOCK_SIZE]; __shared__ TElem pBlockSharedB[MATMUL_CUDA_FIXED_BLOCK_SIZE][MATMUL_CUDA_FIXED_BLOCK_SIZE]; // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. auto const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k)/static_cast<float>(uiBlockThreadsExtent)))); for(TIdx k2=0; k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiBlockThreadIdxY][uiBlockThreadIdxX] = ((!bInsideA) || (uiAIdxX>=k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiBlockThreadIdxY][uiBlockThreadIdxX] = ((!bInsideB) || (uiBIdxY>=k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for(TIdx k3 = 0; k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY][k3] * pBlockSharedB[k3][uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if(bInsideC) { auto const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_fixed_block_size_2d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } dim3 const dimBlock(MATMUL_CUDA_FIXED_BLOCK_SIZE, MATMUL_CUDA_FIXED_BLOCK_SIZE); float const fGridThreadExtentX = ceil(((float)n) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); float const fGridThreadExtentY = ceil(((float)m) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); unsigned int const uiGridThreadExtentX = (unsigned int)fGridThreadExtentX; unsigned int const uiGridThreadExtentY = (unsigned int)fGridThreadExtentY; dim3 const dimGrid(uiGridThreadExtentX, uiGridThreadExtentY); matmul_gemm_par_cuda_fixed_block_size_2d_static_shared_kernel<<< dimGrid, dimBlock, 0>>>( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(cudaDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_fixed_block_size_2d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_fixed_block_size_2d_static_shared); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_fixed_block_size_1d_static_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. __shared__ TElem pBlockSharedA[MATMUL_CUDA_FIXED_BLOCK_SIZE*MATMUL_CUDA_FIXED_BLOCK_SIZE]; __shared__ TElem pBlockSharedB[MATMUL_CUDA_FIXED_BLOCK_SIZE*MATMUL_CUDA_FIXED_BLOCK_SIZE]; auto const uiSharedBlockIdx1d(uiBlockThreadIdxY*uiBlockThreadsExtentX + uiBlockThreadIdxX); // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. auto const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k)/static_cast<float>(uiBlockThreadsExtent)))); for(TIdx k2=0; k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiSharedBlockIdx1d] = ((!bInsideA) || (uiAIdxX>=k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiSharedBlockIdx1d] = ((!bInsideB) || (uiBIdxY>=k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for(TIdx k3 = 0; k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY*uiBlockThreadsExtentX + k3] * pBlockSharedB[k3*uiBlockThreadsExtentY + uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if(bInsideC) { auto const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_fixed_block_size_1d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } dim3 const dimBlock(MATMUL_CUDA_FIXED_BLOCK_SIZE, MATMUL_CUDA_FIXED_BLOCK_SIZE); float const fGridThreadExtentX = ceil(((float)n) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); float const fGridThreadExtentY = ceil(((float)m) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); unsigned int const uiGridThreadExtentX = (unsigned int)fGridThreadExtentX; unsigned int const uiGridThreadExtentY = (unsigned int)fGridThreadExtentY; dim3 const dimGrid(uiGridThreadExtentX, uiGridThreadExtentY); matmul_gemm_par_cuda_fixed_block_size_1d_static_shared_kernel<<< dimGrid, dimBlock, 0>>>( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(cudaDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_fixed_block_size_1d_static_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_fixed_block_size_1d_static_shared); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. extern __shared__ TElem pBlockSharedA[]; auto * const pBlockSharedB(pBlockSharedA + uiBlockThreadsExtentX*uiBlockThreadsExtentY); auto const uiSharedBlockIdx1d(uiBlockThreadIdxY*uiBlockThreadsExtentX + uiBlockThreadIdxX); // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. auto const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k)/static_cast<float>(uiBlockThreadsExtent)))); for(TIdx k2=0; k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiSharedBlockIdx1d] = ((!bInsideA) || (uiAIdxX>=k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiSharedBlockIdx1d] = ((!bInsideB) || (uiBIdxY>=k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for(TIdx k3 = 0; k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY*uiBlockThreadsExtentX + k3] * pBlockSharedB[k3*uiBlockThreadsExtentY + uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if(bInsideC) { auto const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } dim3 const dimBlock(MATMUL_CUDA_FIXED_BLOCK_SIZE, MATMUL_CUDA_FIXED_BLOCK_SIZE); float const fGridThreadExtentX = ceil(((float)n) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); float const fGridThreadExtentY = ceil(((float)m) / ((float)MATMUL_CUDA_FIXED_BLOCK_SIZE)); unsigned int const uiGridThreadExtentX = (unsigned int)fGridThreadExtentX; unsigned int const uiGridThreadExtentY = (unsigned int)fGridThreadExtentY; dim3 const dimGrid(uiGridThreadExtentX, uiGridThreadExtentY); matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared_kernel<<< dimGrid, dimBlock, 2u*sizeof(TElem)*MATMUL_CUDA_FIXED_BLOCK_SIZE*MATMUL_CUDA_FIXED_BLOCK_SIZE>>>( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(cudaDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_FIXED_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_fixed_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_fixed_block_size_1d_extern_shared); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_DYN_BLOCK_SIZE //----------------------------------------------------------------------------- // This function only works for square blocks. //----------------------------------------------------------------------------- __global__ void matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared_kernel( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { // blockIdx.x and blockIdx.y are the indices of the block to calculate inside C. TIdx const uiGridThreadIdxX = blockIdx.x*blockDim.x + threadIdx.x; // Column inside C to calculate. TIdx const uiGridThreadIdxY = blockIdx.y*blockDim.y + threadIdx.y; // Row inside C to calculate. TIdx const uiBlockThreadIdxX = threadIdx.x; // Column inside the block of C to calculate. TIdx const uiBlockThreadIdxY = threadIdx.y; // Row inside the block of C to calculate. TIdx const uiBlockThreadsExtentX = blockDim.x; TIdx const uiBlockThreadsExtentY = blockDim.y; //assert(uiBlockThreadsExtentX == uiBlockThreadsExtentY); TIdx const uiBlockThreadsExtent = uiBlockThreadsExtentX; // Shared memory used to store the current blocks of A and B. extern __shared__ TElem pBlockSharedA[]; TElem * const pBlockSharedB(pBlockSharedA + uiBlockThreadsExtentX*uiBlockThreadsExtentY); TIdx const uiSharedBlockIdx1d(uiBlockThreadIdxY*uiBlockThreadsExtentX + uiBlockThreadIdxX); // If the element is outside of the matrix, write zero into the shared block. bool const bInsideA = (uiGridThreadIdxY < m); bool const bInsideB = (uiGridThreadIdxX < n); bool const bInsideC = (bInsideA && bInsideB); TElem dotProduct(0); // Loop over all blocks of A and B that are required to compute the C block. TIdx const uiBlockMulCount( static_cast<TIdx>( ceil( static_cast<float>(k) / static_cast<float>(uiBlockThreadsExtent)))); for (TIdx k2(0); k2<uiBlockMulCount; ++k2) { // Copy data to shared memory. TIdx const uiAIdxX(k2*uiBlockThreadsExtentX + uiBlockThreadIdxX); TIdx const uiAIdx1d(uiGridThreadIdxY*lda + uiAIdxX); pBlockSharedA[uiSharedBlockIdx1d] = ((!bInsideA) || (uiAIdxX >= k)) ? static_cast<TElem>(0) : A[uiAIdx1d]; TIdx const uiBIdxY(k2*uiBlockThreadsExtentY + uiBlockThreadIdxY); TIdx const uiBIdx1d(uiBIdxY*ldb + uiGridThreadIdxX); pBlockSharedB[uiSharedBlockIdx1d] = ((!bInsideB) || (uiBIdxY >= k)) ? static_cast<TElem>(0) : B[uiBIdx1d]; // Synchronize to make sure the sub-matrices are loaded before starting the computation. __syncthreads(); // Dyadic product within shared memory. for (TIdx k3(0); k3<uiBlockThreadsExtent; ++k3) { dotProduct += pBlockSharedA[uiBlockThreadIdxY*uiBlockThreadsExtentX + k3] * pBlockSharedB[k3*uiBlockThreadsExtentY + uiBlockThreadIdxX]; } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration. __syncthreads(); } if (bInsideC) { TIdx const uiIdxC1d(uiGridThreadIdxY*ldc + uiGridThreadIdxX); C[uiIdxC1d] = alpha * dotProduct + beta * C[uiIdxC1d]; } } //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { if(matmul_mat_gemm_early_out(m, n, k, alpha, beta)) { return; } MATMUL_CUDA_RT_CHECK(cudaSetDevice(0)); cudaStream_t stream; MATMUL_CUDA_RT_CHECK(cudaStreamCreate(&stream)); // Get its properties. cudaDeviceProp cudaDevProp; MATMUL_CUDA_RT_CHECK(cudaGetDeviceProperties( &cudaDevProp, 0)); TIdx vuiGridThreadExtents[] = { m, n }; TIdx vuiBlockThreadExtents[] = { cudaDevProp.maxThreadsDim[0], cudaDevProp.maxThreadsDim[1] }; // Restrict the max block thread extents with the grid thread extents. // This removes dimensions not required in the given grid thread extents. // This has to be done before the uiMaxBlockThreadsCount clipping to get the maximum correctly. for (TIdx i(0); i<2; ++i) { vuiBlockThreadExtents[i] = std::min(vuiBlockThreadExtents[i], vuiGridThreadExtents[i]); } // Restrict it to its minimum component. // For example (512, 256) will get (256, 256). auto uiMinBlockThreadExtent(vuiBlockThreadExtents[0]); for (TIdx i(1); i<2; ++i) { uiMinBlockThreadExtent = std::min(uiMinBlockThreadExtent, vuiBlockThreadExtents[i]); } for (TIdx i(0); i<2; ++i) { vuiBlockThreadExtents[i] = uiMinBlockThreadExtent; } // Adjust vuiBlockThreadExtents if its product is too large. if ((vuiBlockThreadExtents[0] * vuiBlockThreadExtents[1]) > cudaDevProp.maxThreadsPerBlock) { // Satisfy the following equation: // udaDevProp.maxThreadsPerBlock >= vuiBlockThreadExtents[0]*vuiBlockThreadExtents[1] // For example 1024 >= 512 * 512 // For equal block thread extent this is easily the nth root of cudaDevProp.maxThreadsPerBlock. double const fNthRoot(std::pow(cudaDevProp.maxThreadsPerBlock, 1.0 / 2.0)); auto const uiNthRoot(static_cast<TIdx>(fNthRoot)); for (TIdx i(0); i<2; ++i) { vuiBlockThreadExtents[i] = uiNthRoot; } } // Set the grid block extents (rounded to the next integer not less then the quotient. TIdx vuiGridBlockExtents[] = { 1, 1 }; for (TIdx i(0); i<2; ++i) { vuiGridBlockExtents[i] = static_cast<TIdx>( std::ceil(static_cast<double>(vuiGridThreadExtents[i]) / static_cast<double>(vuiBlockThreadExtents[i]))); } dim3 const dimBlock(vuiBlockThreadExtents[0], vuiBlockThreadExtents[1]); dim3 const dimGrid(vuiGridBlockExtents[0], vuiGridBlockExtents[1]); MATMUL_CUDA_RT_CHECK(cudaSetDevice(0)); matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared_kernel<<< dimGrid, dimBlock, 2u*sizeof(TElem)*vuiBlockThreadExtents[0] * vuiBlockThreadExtents[1], stream>>>( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); MATMUL_CUDA_RT_CHECK(cudaSetDevice(0)); MATMUL_CUDA_RT_CHECK(cudaStreamSynchronize(stream)); MATMUL_CUDA_RT_CHECK(cudaSetDevice(0)); MATMUL_CUDA_RT_CHECK(cudaStreamDestroy(stream)); //MATMUL_CUDA_RT_CHECK(cudaDeviceSynchronize()); } #endif #ifdef MATMUL_BUILD_PAR_CUDA_MEMCPY_DYN_BLOCK_SIZE //----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- void matmul_gemm_par_cuda_memcpy_dyn_block_size_1d_extern_shared( TIdx const m, TIdx const n, TIdx const k, TElem const alpha, TElem const * const MATMUL_RESTRICT A, TIdx const lda, TElem const * const MATMUL_RESTRICT B, TIdx const ldb, TElem const beta, TElem * const MATMUL_RESTRICT C, TIdx const ldc) { matmul_gemm_wrap_memcpy_host_cuda_2d( m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, matmul_gemm_par_cuda_dyn_block_size_1d_extern_shared); } #endif #endif
d3dc210cdae236fa71cf933ea61d14a91e9f26c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduceSum(int *idata, int *odata, unsigned int ncols) { // Reduce rows to the first element in each row int i; int blockOffset; int rowStartPos; int colsPerThread; int *mypart; // Each block gets a row, each thread will reduce part of a row // Calculate the offset of the row blockOffset = blockIdx.x * ncols; // Calculate our offset into the row rowStartPos = threadIdx.x * (ncols/blockDim.x); // The number of cols per thread colsPerThread = ncols/blockDim.x; mypart = idata + blockOffset + rowStartPos; // Sum all of the elements in my thread block and put them // into the first column spot for (i = 1; i < colsPerThread; i++) { mypart[0] += mypart[i]; } __syncthreads(); // Wait for everyone to complete // Now reduce all of the threads in my block into the first spot for my row if(threadIdx.x == 0) { odata[blockIdx.x] = 0; for(i = 0; i < blockDim.x; i++) { odata[blockIdx.x] += mypart[i*colsPerThread]; } } // We cant synchronize between blocks, so we will have to start another kernel }
d3dc210cdae236fa71cf933ea61d14a91e9f26c5.cu
#include "includes.h" __global__ void reduceSum(int *idata, int *odata, unsigned int ncols) { // Reduce rows to the first element in each row int i; int blockOffset; int rowStartPos; int colsPerThread; int *mypart; // Each block gets a row, each thread will reduce part of a row // Calculate the offset of the row blockOffset = blockIdx.x * ncols; // Calculate our offset into the row rowStartPos = threadIdx.x * (ncols/blockDim.x); // The number of cols per thread colsPerThread = ncols/blockDim.x; mypart = idata + blockOffset + rowStartPos; // Sum all of the elements in my thread block and put them // into the first column spot for (i = 1; i < colsPerThread; i++) { mypart[0] += mypart[i]; } __syncthreads(); // Wait for everyone to complete // Now reduce all of the threads in my block into the first spot for my row if(threadIdx.x == 0) { odata[blockIdx.x] = 0; for(i = 0; i < blockDim.x; i++) { odata[blockIdx.x] += mypart[i*colsPerThread]; } } // We cant synchronize between blocks, so we will have to start another kernel }
1d9de72771c5ff4ae9098c708bfbea0db3094939.hip
// !!! This is a file automatically generated by hipify!!! #include <unistd.h> #include <iostream> #include <stdlib.h> #include <assert.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include "fp16_conversion.h" using namespace std; // #define FP16MM const char* cublasGetErrorString(hipblasStatus_t status) { switch(status) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; } return "unknown error"; } // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } return result; } inline hipblasStatus_t checkCublas(hipblasStatus_t result) { if (result != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "CUDA Runtime Error: %s\n", cublasGetErrorString(result)); assert(result == HIPBLAS_STATUS_SUCCESS); } return result; } // Fill the array A(nr_rows_A, nr_cols_A) with random numbers on CPU void CPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { int a=1; for(int i = 0; i < nr_rows_A * nr_cols_A; i++){ A[i] = (float)rand()/(float)(RAND_MAX/a); } } int main(int argc, char ** argv){ int min_m_k_n = 64; // min matrix size int max_m_k_n = 7744; // max matrix size int repeats = 100; int verbose = 1; #ifndef FP16MM cout << "\ncublasSgemm test result:\n" << endl; #else cout << "\ncublasHgemm test result:\n" << endl; #endif if(verbose) cout << "running with" << " min_m_k_n: " << min_m_k_n << " max_m_k_n: " << max_m_k_n << " repeats: " << repeats << endl; hipblasStatus_t stat; hipblasHandle_t handle; checkCublas(hipblasCreate(&handle)); if(verbose) cout << "allocating device variables" << endl; // Allocate 3 arrays on CPU float *h_A = (float *)malloc(max_m_k_n * max_m_k_n * sizeof(float)); float *h_B = (float *)malloc(max_m_k_n * max_m_k_n * sizeof(float)); float *h_C = (float *)malloc(max_m_k_n * max_m_k_n * sizeof(float)); CPU_fill_rand(h_A, max_m_k_n, max_m_k_n); CPU_fill_rand(h_B, max_m_k_n, max_m_k_n); CPU_fill_rand(h_C, max_m_k_n, max_m_k_n); #ifndef FP16MM // Allocate 3 arrays on GPU float *d_A, *d_B, *d_C; checkCuda(hipMallocManaged(&d_A, max_m_k_n * max_m_k_n * sizeof(float))); checkCuda(hipMallocManaged(&d_B, max_m_k_n * max_m_k_n * sizeof(float))); checkCuda(hipMallocManaged(&d_C, max_m_k_n * max_m_k_n * sizeof(float))); checkCuda(hipMemcpy(d_A,h_A,max_m_k_n * max_m_k_n * sizeof(float),hipMemcpyHostToDevice)); checkCuda(hipMemcpy(d_B,h_B,max_m_k_n * max_m_k_n * sizeof(float),hipMemcpyHostToDevice)); checkCuda(hipMemcpy(d_C,h_C,max_m_k_n * max_m_k_n * sizeof(float),hipMemcpyHostToDevice)); int lda, ldb, ldc, m, n, k; const float alf = 1.0f; const float bet = 0.0f; const float *alpha = &alf; const float *beta = &bet; #else __half *d_A, *d_B, *d_C; checkCuda(hipMallocManaged(&d_A, max_m_k_n * max_m_k_n * sizeof(__half))); checkCuda(hipMallocManaged(&d_B, max_m_k_n * max_m_k_n * sizeof(__half))); checkCuda(hipMallocManaged(&d_C, max_m_k_n * max_m_k_n * sizeof(__half))); for (int i = 0; i < max_m_k_n * max_m_k_n; i++) { d_A[i] = approx_float_to_half(h_A[i]); d_B[i] = approx_float_to_half(h_B[i]); d_C[i] = approx_float_to_half(h_C[i]); } int lda, ldb, ldc, m, n, k; const __half alf = approx_float_to_half(1.0); const __half bet = approx_float_to_half(0.0); const __half *alpha = &alf; const __half *beta = &bet; #endif hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); for(int size = min_m_k_n; size <= max_m_k_n; size=size+64){ // step size float sum = 0.0; for(int rep = 0; rep < repeats; rep++){ hipEventRecord(start, 0); m=n=k=size; lda = m; ldb = k; ldc = m; #ifndef FP16MM stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc); #else stat = hipblasHgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc); #endif hipEventRecord(stop,0); hipEventSynchronize(stop); if(stat != HIPBLAS_STATUS_SUCCESS){ cerr << "hipblasSgemmBatched failed" << endl; exit(1); } assert(!hipGetLastError()); float elapsed; hipEventElapsedTime(&elapsed, start, stop); elapsed /= 1000.0f; sum += elapsed; } float time = sum/repeats; #ifndef FP16MM cout << " matrix (32): " #else cout << " matrix (16): " #endif << size << ", ops: " << endl << " average time: " << time << " s "<< endl; // GFLOPS: (m*n*k*2/time)/1e9 } //Free GPU memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // Free CPU memory free(h_A); free(h_B); free(h_C); return 0; }
1d9de72771c5ff4ae9098c708bfbea0db3094939.cu
#include <unistd.h> #include <iostream> #include <stdlib.h> #include <assert.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include "fp16_conversion.h" using namespace std; // #define FP16MM const char* cublasGetErrorString(cublasStatus_t status) { switch(status) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; } return "unknown error"; } // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } inline cublasStatus_t checkCublas(cublasStatus_t result) { if (result != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "CUDA Runtime Error: %s\n", cublasGetErrorString(result)); assert(result == CUBLAS_STATUS_SUCCESS); } return result; } // Fill the array A(nr_rows_A, nr_cols_A) with random numbers on CPU void CPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { int a=1; for(int i = 0; i < nr_rows_A * nr_cols_A; i++){ A[i] = (float)rand()/(float)(RAND_MAX/a); } } int main(int argc, char ** argv){ int min_m_k_n = 64; // min matrix size int max_m_k_n = 7744; // max matrix size int repeats = 100; int verbose = 1; #ifndef FP16MM cout << "\ncublasSgemm test result:\n" << endl; #else cout << "\ncublasHgemm test result:\n" << endl; #endif if(verbose) cout << "running with" << " min_m_k_n: " << min_m_k_n << " max_m_k_n: " << max_m_k_n << " repeats: " << repeats << endl; cublasStatus_t stat; cublasHandle_t handle; checkCublas(cublasCreate(&handle)); if(verbose) cout << "allocating device variables" << endl; // Allocate 3 arrays on CPU float *h_A = (float *)malloc(max_m_k_n * max_m_k_n * sizeof(float)); float *h_B = (float *)malloc(max_m_k_n * max_m_k_n * sizeof(float)); float *h_C = (float *)malloc(max_m_k_n * max_m_k_n * sizeof(float)); CPU_fill_rand(h_A, max_m_k_n, max_m_k_n); CPU_fill_rand(h_B, max_m_k_n, max_m_k_n); CPU_fill_rand(h_C, max_m_k_n, max_m_k_n); #ifndef FP16MM // Allocate 3 arrays on GPU float *d_A, *d_B, *d_C; checkCuda(cudaMallocManaged(&d_A, max_m_k_n * max_m_k_n * sizeof(float))); checkCuda(cudaMallocManaged(&d_B, max_m_k_n * max_m_k_n * sizeof(float))); checkCuda(cudaMallocManaged(&d_C, max_m_k_n * max_m_k_n * sizeof(float))); checkCuda(cudaMemcpy(d_A,h_A,max_m_k_n * max_m_k_n * sizeof(float),cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(d_B,h_B,max_m_k_n * max_m_k_n * sizeof(float),cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(d_C,h_C,max_m_k_n * max_m_k_n * sizeof(float),cudaMemcpyHostToDevice)); int lda, ldb, ldc, m, n, k; const float alf = 1.0f; const float bet = 0.0f; const float *alpha = &alf; const float *beta = &bet; #else __half *d_A, *d_B, *d_C; checkCuda(cudaMallocManaged(&d_A, max_m_k_n * max_m_k_n * sizeof(__half))); checkCuda(cudaMallocManaged(&d_B, max_m_k_n * max_m_k_n * sizeof(__half))); checkCuda(cudaMallocManaged(&d_C, max_m_k_n * max_m_k_n * sizeof(__half))); for (int i = 0; i < max_m_k_n * max_m_k_n; i++) { d_A[i] = approx_float_to_half(h_A[i]); d_B[i] = approx_float_to_half(h_B[i]); d_C[i] = approx_float_to_half(h_C[i]); } int lda, ldb, ldc, m, n, k; const __half alf = approx_float_to_half(1.0); const __half bet = approx_float_to_half(0.0); const __half *alpha = &alf; const __half *beta = &bet; #endif cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); for(int size = min_m_k_n; size <= max_m_k_n; size=size+64){ // step size float sum = 0.0; for(int rep = 0; rep < repeats; rep++){ cudaEventRecord(start, 0); m=n=k=size; lda = m; ldb = k; ldc = m; #ifndef FP16MM stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc); #else stat = cublasHgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc); #endif cudaEventRecord(stop,0); cudaEventSynchronize(stop); if(stat != CUBLAS_STATUS_SUCCESS){ cerr << "cublasSgemmBatched failed" << endl; exit(1); } assert(!cudaGetLastError()); float elapsed; cudaEventElapsedTime(&elapsed, start, stop); elapsed /= 1000.0f; sum += elapsed; } float time = sum/repeats; #ifndef FP16MM cout << " matrix (32): " #else cout << " matrix (16): " #endif << size << ", ops: " << endl << " average time: " << time << " s "<< endl; // GFLOPS: (m*n*k*2/time)/1e9 } //Free GPU memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free CPU memory free(h_A); free(h_B); free(h_C); return 0; }
37c81da037ebed2eae064245c10993ff676766f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag> __global__ void LookupTable(T *output, const T *table, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const int64_t padding_idx) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); T *out = output + idy * D; const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { if (PaddingFlag) { if (id == padding_idx) out[i] = static_cast<T>(0); else out[i] = tab[i]; } else { out[i] = tab[i]; } } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); const T *out = output + idy * D; T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { paddle::platform::CudaAtomicAdd(&tab[i], out[i]); } idy += BlockDimY * GridDimX; } } template <typename T> class LookupTableCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<LoDTensor>("W"); auto *ids_t = context.Input<LoDTensor>("Ids"); auto *output_t = context.Output<LoDTensor>("Out"); int64_t padding_idx = context.Attr<int64_t>("padding_idx"); auto id_name = context.InputNames("Ids").front(); auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); auto *ids = ids_t->data<int64_t>(); auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP if (padding_idx == -1) hipLaunchKernelGGL(( LookupTable< T, 64, 4, 8, false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); else hipLaunchKernelGGL(( LookupTable< T, 64, 4, 8, true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); #else if (padding_idx == -1) hipLaunchKernelGGL(( LookupTable< T, 128, 8, 8, false>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); else hipLaunchKernelGGL(( LookupTable< T, 128, 8, 8, true>), dim3(grids), dim3(threads), 0, context.cuda_device_context().stream(), output, table, ids, N, K, D, padding_idx); #endif // PADDLE_WITH_HIP } }; template <typename T> class LookupTableGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); bool is_sparse = context.Attr<bool>("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { auto *ids = context.Input<LoDTensor>("Ids"); auto *table = context.Input<LoDTensor>("W"); auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out")); auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W")); auto *ids_data = ids->data<int64_t>(); int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory framework::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace()); // TODO(yuyang18): Strange code here. memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data<T>(context.GetPlace()); auto *d_table_data = d_table_value->data<T>(); auto *d_output_data = d_output->data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, platform::errors::InvalidArgument( "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d)); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } else { auto ids_t = context.Input<LoDTensor>("Ids"); auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t *ids = ids_t->data<int64_t>(); const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(( LookupTableGrad<T, 64, 4, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids, N, K, D); #else hipLaunchKernelGGL(( LookupTableGrad<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, dev_ctx.stream(), d_table, d_output, ids, N, K, D); #endif // PADDLE_WITH_HIP } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>, ops::LookupTableCUDAKernel<double>, ops::LookupTableCUDAKernel<plat::float16>, ops::LookupTableCUDAKernel<int8_t>, ops::LookupTableCUDAKernel<int16_t>); REGISTER_OP_CUDA_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel<float>, ops::LookupTableGradCUDAKernel<double>, ops::LookupTableGradCUDAKernel<plat::float16>);
37c81da037ebed2eae064245c10993ff676766f4.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, int BlockDimX, int BlockDimY, int GridDimX, bool PaddingFlag> __global__ void LookupTable(T *output, const T *table, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const int64_t padding_idx) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); T *out = output + idy * D; const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { if (PaddingFlag) { if (id == padding_idx) out[i] = static_cast<T>(0); else out[i] = tab[i]; } else { out[i] = tab[i]; } } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE( id >= 0, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); PADDLE_ENFORCE( id < N, "Variable value (input) of OP(fluid.layers.embedding) " "expected >= 0 and < %ld, but got %ld. Please check input value.", N, id); const T *out = output + idy * D; T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { paddle::platform::CudaAtomicAdd(&tab[i], out[i]); } idy += BlockDimY * GridDimX; } } template <typename T> class LookupTableCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *table_t = context.Input<LoDTensor>("W"); auto *ids_t = context.Input<LoDTensor>("Ids"); auto *output_t = context.Output<LoDTensor>("Out"); int64_t padding_idx = context.Attr<int64_t>("padding_idx"); auto id_name = context.InputNames("Ids").front(); auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); auto *ids = ids_t->data<int64_t>(); auto *table = table_t->data<T>(); auto *output = output_t->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP if (padding_idx == -1) LookupTable< T, 64, 4, 8, false><<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); else LookupTable< T, 64, 4, 8, true><<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); #else if (padding_idx == -1) LookupTable< T, 128, 8, 8, false><<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); else LookupTable< T, 128, 8, 8, true><<<grids, threads, 0, context.cuda_device_context().stream()>>>( output, table, ids, N, K, D, padding_idx); #endif // PADDLE_WITH_HIP } }; template <typename T> class LookupTableGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); bool is_sparse = context.Attr<bool>("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { auto *ids = context.Input<LoDTensor>("Ids"); auto *table = context.Input<LoDTensor>("W"); auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out")); auto *d_table = context.Output<SelectedRows>(framework::GradVarName("W")); auto *ids_data = ids->data<int64_t>(); int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory framework::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace()); // TODO(yuyang18): Strange code here. memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data<T>(context.GetPlace()); auto *d_table_data = d_table_value->data<T>(); auto *d_output_data = d_output->data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, platform::errors::InvalidArgument( "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d)); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } else { auto ids_t = context.Input<LoDTensor>("Ids"); auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out")); auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); const int64_t *ids = ids_t->data<int64_t>(); const T *d_output = d_output_t->data<T>(); T *d_table = d_table_t->mutable_data<T>(context.GetPlace()); auto t = framework::EigenVector<T>::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); #ifdef PADDLE_WITH_HIP dim3 threads(64, 4); #else dim3 threads(128, 8); #endif // PADDLE_WITH_HIP dim3 grids(8, 1); #ifdef PADDLE_WITH_HIP LookupTableGrad<T, 64, 4, 8><<<grids, threads, 0, dev_ctx.stream()>>>( d_table, d_output, ids, N, K, D); #else LookupTableGrad<T, 128, 8, 8><<<grids, threads, 0, dev_ctx.stream()>>>( d_table, d_output, ids, N, K, D); #endif // PADDLE_WITH_HIP } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel<float>, ops::LookupTableCUDAKernel<double>, ops::LookupTableCUDAKernel<plat::float16>, ops::LookupTableCUDAKernel<int8_t>, ops::LookupTableCUDAKernel<int16_t>); REGISTER_OP_CUDA_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel<float>, ops::LookupTableGradCUDAKernel<double>, ops::LookupTableGradCUDAKernel<plat::float16>);
f9a5fee61fa3b0f09103572cc09d25f4c178add9.hip
// !!! This is a file automatically generated by hipify!!! #include "pyramid.cuh" // cuda includes #include <cuda/cuda_helper.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #include "../helper_cuda.h" #include <iostream> #include <cstdio> GPUPyramid::GPUPyramid( int W, int H, int pyramidHeight, Texture *texture ) : _W( W ), _H( H ), _pyramidHeight(pyramidHeight) { init(texture); } GPUPyramid::~GPUPyramid() { for(uint i = 0 ; i < _gaussianPyramidTex.size() ; ++i) { if(_gaussianPyramidTex[i] != 0) { delete _gaussianPyramidTex[i]; _gaussianPyramidTex[i] = 0; } } for(uint i = 0 ; i < _gaussianPyramidArray.size() ; ++i) { if(_gaussianPyramidArray[i] != 0) { CUDA_SAFE_CALL( hipFree( _gaussianPyramidArray[i] )); _gaussianPyramidArray[i] = 0; } } for(uint i = 0 ; i < _laplacianPyramidTex.size() ; ++i) { if(_laplacianPyramidTex[i] != 0) { delete _laplacianPyramidTex[i]; _laplacianPyramidTex[i] = 0; } } for(uint i = 0 ; i < _laplacianPyramidArray.size() ; ++i) { if(_laplacianPyramidArray[i] != 0) { CUDA_SAFE_CALL( hipFree( _laplacianPyramidArray[i] )); _laplacianPyramidArray[i] = 0; } } } void GPUPyramid::init(Texture *texture) { hipGLSetGLDevice(gpuGetMaxGflopsDeviceId()); const uint nbChannels = 3; assert(texture->getInternalFormat() == GL_RGB); unsigned char *charBuffer = new unsigned char[nbChannels*_W*_H]; float *floatBuffer; CUDA_SAFE_CALL( hipMalloc( &floatBuffer, nbChannels*_W*_H*sizeof(float) )); CUDA_SAFE_CALL( hipMemset( floatBuffer, 0, nbChannels*_W*_H*sizeof(float) )); // TODO get texture from openGL to CUDA cudaGraphicsResource *resData; checkCudaErrors( hipGraphicsGLRegisterImage(&resData, texture->getID(), GL_TEXTURE_RECTANGLE, hipGraphicsRegisterFlagsReadOnly) ); glBindTexture(GL_TEXTURE_RECTANGLE, texture->getID()); glGetTexImage( GL_TEXTURE_RECTANGLE, 0, texture->getFormat(), texture->getType(), charBuffer ); glBindTexture(GL_TEXTURE_RECTANGLE, 0); for(uint i = 0 ; i < nbChannels*_W*_H ; ++i) { floatBuffer[i] = (float)charBuffer[i] / 255.0f; } delete[] charBuffer; _gaussianPyramidArray.push_back(floatBuffer); Texture* originalTexture = new Texture(0, _W, _H, GL_RGB, GL_FLOAT, GL_RGB32F, false); // TODO load openGL texture from CUDA originalTexture->loadFromData(_gaussianPyramidArray[0]); _gaussianPyramidTex.push_back(originalTexture); // for every scale. don't go to max scale // uint s = 0; // while(_W / (uint)pow(2.0, (double)s) > 0 && _H / (uint)pow(2.0, (double)s) > 0) { for(uint s = 1 ; s <= (uint)_pyramidHeight ; ++s) { float *gaussianArray; Texture* gaussianTex = new Texture(0, _W, _H, GL_RGB, GL_FLOAT, GL_RGB32F, false); CUDA_SAFE_CALL( hipMalloc( &gaussianArray, nbChannels*_W*_H*sizeof(float) )); CUDA_SAFE_CALL( hipMemset( gaussianArray, 0, nbChannels*_W*_H*sizeof(float) )); _gaussianPyramidArray.push_back(gaussianArray); // TODO oddHDC(_W, _H, nbChannels, s); // TODO load openGL texture from CUDA gaussianTex->loadFromData(_gaussianPyramidArray[s]); _gaussianPyramidTex.push_back(gaussianTex); float *laplacianArray; Texture* laplacianTex = new Texture(0, _W, _H, GL_RGB, GL_FLOAT, GL_RGB32F, false); CUDA_SAFE_CALL( hipMalloc( &laplacianArray, nbChannels*_W*_H*sizeof(float) )); CUDA_SAFE_CALL( hipMemset( laplacianArray, 0, nbChannels*_W*_H*sizeof(float) )); _laplacianPyramidArray.push_back(laplacianArray); // TODO dog(_W, _H, nbChannels, s-1); // TODO load openGL texture from CUDA laplacianTex->loadFromData(_laplacianPyramidArray[s-1]); _laplacianPyramidTex.push_back(laplacianTex); } }
f9a5fee61fa3b0f09103572cc09d25f4c178add9.cu
#include "pyramid.cuh" // cuda includes #include <cuda/cuda_helper.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include "../helper_cuda.h" #include <iostream> #include <cstdio> GPUPyramid::GPUPyramid( int W, int H, int pyramidHeight, Texture *texture ) : _W( W ), _H( H ), _pyramidHeight(pyramidHeight) { init(texture); } GPUPyramid::~GPUPyramid() { for(uint i = 0 ; i < _gaussianPyramidTex.size() ; ++i) { if(_gaussianPyramidTex[i] != 0) { delete _gaussianPyramidTex[i]; _gaussianPyramidTex[i] = 0; } } for(uint i = 0 ; i < _gaussianPyramidArray.size() ; ++i) { if(_gaussianPyramidArray[i] != 0) { CUDA_SAFE_CALL( cudaFree( _gaussianPyramidArray[i] )); _gaussianPyramidArray[i] = 0; } } for(uint i = 0 ; i < _laplacianPyramidTex.size() ; ++i) { if(_laplacianPyramidTex[i] != 0) { delete _laplacianPyramidTex[i]; _laplacianPyramidTex[i] = 0; } } for(uint i = 0 ; i < _laplacianPyramidArray.size() ; ++i) { if(_laplacianPyramidArray[i] != 0) { CUDA_SAFE_CALL( cudaFree( _laplacianPyramidArray[i] )); _laplacianPyramidArray[i] = 0; } } } void GPUPyramid::init(Texture *texture) { cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId()); const uint nbChannels = 3; assert(texture->getInternalFormat() == GL_RGB); unsigned char *charBuffer = new unsigned char[nbChannels*_W*_H]; float *floatBuffer; CUDA_SAFE_CALL( cudaMalloc( &floatBuffer, nbChannels*_W*_H*sizeof(float) )); CUDA_SAFE_CALL( cudaMemset( floatBuffer, 0, nbChannels*_W*_H*sizeof(float) )); // TODO get texture from openGL to CUDA cudaGraphicsResource *resData; checkCudaErrors( cudaGraphicsGLRegisterImage(&resData, texture->getID(), GL_TEXTURE_RECTANGLE, cudaGraphicsRegisterFlagsReadOnly) ); glBindTexture(GL_TEXTURE_RECTANGLE, texture->getID()); glGetTexImage( GL_TEXTURE_RECTANGLE, 0, texture->getFormat(), texture->getType(), charBuffer ); glBindTexture(GL_TEXTURE_RECTANGLE, 0); for(uint i = 0 ; i < nbChannels*_W*_H ; ++i) { floatBuffer[i] = (float)charBuffer[i] / 255.0f; } delete[] charBuffer; _gaussianPyramidArray.push_back(floatBuffer); Texture* originalTexture = new Texture(0, _W, _H, GL_RGB, GL_FLOAT, GL_RGB32F, false); // TODO load openGL texture from CUDA originalTexture->loadFromData(_gaussianPyramidArray[0]); _gaussianPyramidTex.push_back(originalTexture); // for every scale. don't go to max scale // uint s = 0; // while(_W / (uint)pow(2.0, (double)s) > 0 && _H / (uint)pow(2.0, (double)s) > 0) { for(uint s = 1 ; s <= (uint)_pyramidHeight ; ++s) { float *gaussianArray; Texture* gaussianTex = new Texture(0, _W, _H, GL_RGB, GL_FLOAT, GL_RGB32F, false); CUDA_SAFE_CALL( cudaMalloc( &gaussianArray, nbChannels*_W*_H*sizeof(float) )); CUDA_SAFE_CALL( cudaMemset( gaussianArray, 0, nbChannels*_W*_H*sizeof(float) )); _gaussianPyramidArray.push_back(gaussianArray); // TODO oddHDC(_W, _H, nbChannels, s); // TODO load openGL texture from CUDA gaussianTex->loadFromData(_gaussianPyramidArray[s]); _gaussianPyramidTex.push_back(gaussianTex); float *laplacianArray; Texture* laplacianTex = new Texture(0, _W, _H, GL_RGB, GL_FLOAT, GL_RGB32F, false); CUDA_SAFE_CALL( cudaMalloc( &laplacianArray, nbChannels*_W*_H*sizeof(float) )); CUDA_SAFE_CALL( cudaMemset( laplacianArray, 0, nbChannels*_W*_H*sizeof(float) )); _laplacianPyramidArray.push_back(laplacianArray); // TODO dog(_W, _H, nbChannels, s-1); // TODO load openGL texture from CUDA laplacianTex->loadFromData(_laplacianPyramidArray[s-1]); _laplacianPyramidTex.push_back(laplacianTex); } }
d0e941d597ba89908c553032e7b79ed0e48b1fd5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> int main() { int dimx = 16; int num_bytes = dimx * sizeof( int ); int *d_a = 0, *h_a = 0; //device and host pointers h_a = (int *) malloc( num_bytes ); // allocate memory on the GPU hipMalloc( (void **) &d_a, num_bytes ); if( 0 == h_a || 0 == d_a ) { printf("couldn't allocate memory\n"); return 911; } /* end if */ // memset on the gpu hipMemset( d_a, 0, num_bytes ); // hipMemcpy( h_a, d_a, num_bytes, hipMemcpyDeviceToHost ); for( int i = 0; i < dimx; i++ ) { printf("%d ", h_a[i] ); } printf("\n"); free( h_a ); hipFree( d_a ); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. if ( hipDeviceReset() != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; }
d0e941d597ba89908c553032e7b79ed0e48b1fd5.cu
#include "cuda_runtime.h" #include <stdio.h> int main() { int dimx = 16; int num_bytes = dimx * sizeof( int ); int *d_a = 0, *h_a = 0; //device and host pointers h_a = (int *) malloc( num_bytes ); // allocate memory on the GPU cudaMalloc( (void **) &d_a, num_bytes ); if( 0 == h_a || 0 == d_a ) { printf("couldn't allocate memory\n"); return 911; } /* end if */ // memset on the gpu cudaMemset( d_a, 0, num_bytes ); // cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost ); for( int i = 0; i < dimx; i++ ) { printf("%d ", h_a[i] ); } printf("\n"); free( h_a ); cudaFree( d_a ); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. if ( cudaDeviceReset() != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }
56b40e18478fd4e4023c8411fac88444700a7a44.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <math.h> #include "filtering_cuda.cuh" #include "../image_buffer.h" #include "../image_exception.h" namespace Image_Function_Cuda { namespace Filtering { Image Gaussian( const Image & in, uint32_t kernelSize, float sigma ) { Image_Function::ParameterValidation( in ); Image out( in.width(), in.height() ); Gaussian( in, out, kernelSize, sigma ); return out; } void Gaussian( const Image & in, Image & out, uint32_t kernelSize, float sigma ) { Image_Function::ParameterValidation( in, out ); if( sigma < 0 ) throw imageException( "Sigma value cannot be negative" ); FFT_Cuda::ComplexData image( in ); FFT_Cuda::ComplexData filter = GetGaussianKernel( in.width(), in.height(), kernelSize, sigma ); FFT_Cuda::FFTExecutor executor( in.width(), in.height() ); executor.directTransform( image ); executor.directTransform( filter ); executor.complexMultiplication( image, filter, image ); executor.inverseTransform( image ); out = image.get(); } FFT_Cuda::ComplexData GetGaussianKernel( uint32_t width, uint32_t height, uint32_t kernelSize, float sigma ) { if( width < 3 || height < 3 || kernelSize == 0 || width < (kernelSize * 2 + 1) || height < (kernelSize * 2 + 1) || sigma < 0 ) throw imageException( "Incorrect input parameters for Gaussian filter kernel" ); const uint32_t size = width * height; std::vector<float> data( size, 0 ); static const float pi = 3.1415926536f; const float doubleSigma = sigma * 2; float * y = data.data() + (height / 2 - kernelSize) * width + width / 2 - kernelSize; const float * endY = y + (2 * kernelSize + 1) * width; float sum = 0; for( int32_t posY = -static_cast<int32_t>(kernelSize) ; y != endY; y += width, ++posY ) { float * x = y; const float * endX = x + 2 * kernelSize + 1; for( int32_t posX = -static_cast<int32_t>(kernelSize) ; x != endX; ++x, ++posX ) { *x = 1.0f / (pi * doubleSigma) * exp( -(posX * posX + posY * posY) / doubleSigma ); sum += *x; } } const float normalization = 1.0f / sum; y = data.data() + (height / 2 - kernelSize) * width + width / 2 - kernelSize; for( int32_t posY = -static_cast<int32_t>(kernelSize) ; y != endY; y += width, ++posY ) { float * x = y; const float * endX = x + 2 * kernelSize + 1; for( int32_t posX = -static_cast<int32_t>(kernelSize) ; x != endX; ++x, ++posX ) { *x *= normalization; } } Cuda_Types::Array<float> cudaData( data ); FFT_Cuda::ComplexData complexData; complexData.resize( width, height ); complexData.set( cudaData ); return complexData; } } }
56b40e18478fd4e4023c8411fac88444700a7a44.cu
#include <cuda_runtime.h> #include <math.h> #include "filtering_cuda.cuh" #include "../image_buffer.h" #include "../image_exception.h" namespace Image_Function_Cuda { namespace Filtering { Image Gaussian( const Image & in, uint32_t kernelSize, float sigma ) { Image_Function::ParameterValidation( in ); Image out( in.width(), in.height() ); Gaussian( in, out, kernelSize, sigma ); return out; } void Gaussian( const Image & in, Image & out, uint32_t kernelSize, float sigma ) { Image_Function::ParameterValidation( in, out ); if( sigma < 0 ) throw imageException( "Sigma value cannot be negative" ); FFT_Cuda::ComplexData image( in ); FFT_Cuda::ComplexData filter = GetGaussianKernel( in.width(), in.height(), kernelSize, sigma ); FFT_Cuda::FFTExecutor executor( in.width(), in.height() ); executor.directTransform( image ); executor.directTransform( filter ); executor.complexMultiplication( image, filter, image ); executor.inverseTransform( image ); out = image.get(); } FFT_Cuda::ComplexData GetGaussianKernel( uint32_t width, uint32_t height, uint32_t kernelSize, float sigma ) { if( width < 3 || height < 3 || kernelSize == 0 || width < (kernelSize * 2 + 1) || height < (kernelSize * 2 + 1) || sigma < 0 ) throw imageException( "Incorrect input parameters for Gaussian filter kernel" ); const uint32_t size = width * height; std::vector<float> data( size, 0 ); static const float pi = 3.1415926536f; const float doubleSigma = sigma * 2; float * y = data.data() + (height / 2 - kernelSize) * width + width / 2 - kernelSize; const float * endY = y + (2 * kernelSize + 1) * width; float sum = 0; for( int32_t posY = -static_cast<int32_t>(kernelSize) ; y != endY; y += width, ++posY ) { float * x = y; const float * endX = x + 2 * kernelSize + 1; for( int32_t posX = -static_cast<int32_t>(kernelSize) ; x != endX; ++x, ++posX ) { *x = 1.0f / (pi * doubleSigma) * exp( -(posX * posX + posY * posY) / doubleSigma ); sum += *x; } } const float normalization = 1.0f / sum; y = data.data() + (height / 2 - kernelSize) * width + width / 2 - kernelSize; for( int32_t posY = -static_cast<int32_t>(kernelSize) ; y != endY; y += width, ++posY ) { float * x = y; const float * endX = x + 2 * kernelSize + 1; for( int32_t posX = -static_cast<int32_t>(kernelSize) ; x != endX; ++x, ++posX ) { *x *= normalization; } } Cuda_Types::Array<float> cudaData( data ); FFT_Cuda::ComplexData complexData; complexData.resize( width, height ); complexData.set( cudaData ); return complexData; } } }
98a074537e93120f626c9a53ce23fa79194794ec.hip
// !!! This is a file automatically generated by hipify!!! //jacobi7.cu #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <math.h> #include "jacobi7_cuda_glmem.h" #include "jacobi7.h" #ifdef __GNUC__ #include <getopt.h> #include <sys/time.h> // Timer function double rtclock(){ struct timeval tp; gettimeofday(&tp, NULL); return (tp.tv_sec + tp.tv_usec*1.0e-6); } #endif // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } int main(int argc, char* *argv){ if(argc != 8) { printf("USAGE: %s <0row_or_1col_first> <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]); return 1; } // program parameters trans const int row_or_col = atoi(argv[1]); const int nx = atoi(argv[2]); const int ny = atoi(argv[3]); const int nz = atoi(argv[4]); const int tx = atoi(argv[5]); const int ty = atoi(argv[6]); const int timesteps = atoi(argv[7]); void (*kernel)(float *, float *, const int , const int , const int , float ); // the first arg determins the row first or column first // 0: row first; 1: column first if (row_or_col == 0) kernel = &jacobi3d_7p_glmem; if (row_or_col == 1) kernel = &jacobi3d_7p_glmem_col; if (row_or_col == 2) kernel = &jacobi3d_7p_glmem_col_row; const int xyz = nx * ny * nz; const int xyz_bytes = xyz * sizeof(float); float *h_dA, *h_dA1; float *h_dB, *h_dB1; float *d_dA; float *d_dB; // Allocate host buffers checkCuda(hipHostMalloc((void**)&h_dA, xyz_bytes)); // host pinned checkCuda(hipHostMalloc((void**)&h_dB, xyz_bytes)); // for comparison btw CPU and GPU version checkCuda(hipHostMalloc((void**)&h_dA1, xyz_bytes)); checkCuda(hipHostMalloc((void**)&h_dB1, xyz_bytes)); // grid data iniatialization // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_dA[i] = 1 + (float)rand() / (float)RAND_MAX; h_dA1[i] = h_dB1[i] = h_dB[i] = h_dA[i]; } // A simple comparison of the result int testIndex = 3 + 3*nx+ 3*nx*ny; printf("Iniatialized data[%d]=%f\n", testIndex , h_dA[testIndex]); printf("h_dA1[%d]=%f\n", testIndex, h_dA1[testIndex]); printf("h_dB1[%d]=%f\n", testIndex, h_dB1[testIndex]); printf("Start computing... \n"); // Always use device 0 checkCuda(hipSetDevice(0)); /* set the ratio of cache/shared memory hipFuncCachePreferNone: Default function cache configuration, no preference hipFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache hipFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory */ //checkCuda(hipDeviceSetCacheConfig(hipFuncCachePreferShared)); // Allocate device buffers checkCuda(hipMalloc((void**)&d_dA, xyz_bytes)); checkCuda(hipMalloc((void**)&d_dB, xyz_bytes)); hipEvent_t start, stop; checkCuda(hipEventCreate(&start)); checkCuda(hipEventCreate(&stop)); float milliseconds = 0; checkCuda(hipEventRecord(start)); // Copy to device checkCuda(hipMemcpy(d_dA, h_dA, xyz_bytes, hipMemcpyHostToDevice)); checkCuda(hipEventRecord(stop)); checkCuda(hipEventSynchronize(stop)); checkCuda(hipEventElapsedTime(&milliseconds, start, stop)); printf("Data %dMB transferred H2D time:%f ms\n", xyz_bytes >> 20, milliseconds); printf("Bandwidth H2D:%f MB/s\n", (float)(xyz_bytes >> 20)/(milliseconds/1000)); checkCuda(hipMemcpy(d_dB, d_dA, xyz_bytes, hipMemcpyDeviceToDevice)); // Setup the kernel float* input = d_dA; float* output = d_dB; // modify nx/tx and ny/ty to (nx+tx-1)/tx and (ny+ty-1)/ty // inorder to avoid wrong configuration dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty); dim3 block(tx, ty); printf("grid:(%d, %d)\n", grid.x, grid.y); printf("block:(%d, %d)\n", tx, ty); float *tmp; float fac = 6.0/(h_dA[0] * h_dA[0]); checkCuda(hipEventRecord(start)); for(int t = 0; t < timesteps; t += 1) { hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, input, output, nx, ny, nz, fac); tmp = input; input = output; output = tmp; } checkCuda(hipEventRecord(stop)); checkCuda(hipEventSynchronize(stop)); checkCuda(hipEventElapsedTime(&milliseconds, start, stop)); // Output the time and GFLOPs of the pure GPU kernel printf("GPU kernel Elapsed Time (pure GPU):%f ms\n", milliseconds); double gflop = (xyz * 1e-9) * 7.0 * timesteps; double gflop_per_sec = gflop * 1e3 / milliseconds; printf("(GPU) %lf GFlop/s\n", gflop_per_sec); double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / milliseconds; printf("(GPU) %lf M updates/s\n", mupdate_per_sec); // Copy the result to main memory checkCuda(hipEventRecord(start)); checkCuda(hipMemcpy(h_dB, input, xyz_bytes, hipMemcpyDeviceToHost)); checkCuda(hipEventRecord(stop)); checkCuda(hipEventSynchronize(stop)); checkCuda(hipEventElapsedTime(&milliseconds, start, stop)); printf("Data %dMB transferred D2H time:%f ms\n", xyz_bytes >> 20, milliseconds); printf("Bandwidth D2H:%f MB/s\n", (float)(xyz_bytes >> 20)/(milliseconds/1000)); float *gpuResult = h_dB; float *tmp1; for(int t = 0; t < timesteps; t += 1) { jacobi7(nx, ny, nz, h_dA1, h_dB1, fac); tmp1 = h_dA1; h_dA1 = h_dB1; h_dB1 = tmp1; } float *cpuResult = h_dA1; // compare the results btw CPU and GPU version double errorNorm, refNorm, diff; errorNorm = 0.0; refNorm = 0.0; i = 0; for (; i < xyz; ++i){ diff = cpuResult[i] - gpuResult[i]; errorNorm += diff * diff; refNorm += cpuResult[i] * cpuResult[i]; } errorNorm = sqrt(errorNorm); refNorm = sqrt(refNorm); printf("Error Norm:%lf\n", errorNorm); printf("Ref Norm:%lf\n", refNorm); if(abs(refNorm) < 1e-7) { printf("Correctness, FAILED\n"); } else if((errorNorm / refNorm) > 1e-2) { printf("Correctness, FAILED\n"); } else { printf("Correctness, PASSED\n"); } printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]); printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]); printf("h_dA[%d]=%f\n", testIndex, h_dA[testIndex]); printf("h_dB[%d]=%f\n", testIndex, h_dB[testIndex]); printf("h_dA1[%d]=%f\n", testIndex, h_dA1[testIndex]); printf("h_dB1[%d]=%f\n", testIndex, h_dB1[testIndex]); // cleanup checkCuda( hipEventDestroy(start)); checkCuda( hipEventDestroy(stop)); hipHostFree(h_dA); hipHostFree(h_dB); hipHostFree(h_dA1); hipHostFree(h_dB1); hipFree(d_dA); hipFree(d_dB); return 0; }
98a074537e93120f626c9a53ce23fa79194794ec.cu
//jacobi7.cu #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <stdlib.h> #include <math.h> #include "jacobi7_cuda_glmem.h" #include "jacobi7.h" #ifdef __GNUC__ #include <getopt.h> #include <sys/time.h> // Timer function double rtclock(){ struct timeval tp; gettimeofday(&tp, NULL); return (tp.tv_sec + tp.tv_usec*1.0e-6); } #endif // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } int main(int argc, char* *argv){ if(argc != 8) { printf("USAGE: %s <0row_or_1col_first> <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]); return 1; } // program parameters trans const int row_or_col = atoi(argv[1]); const int nx = atoi(argv[2]); const int ny = atoi(argv[3]); const int nz = atoi(argv[4]); const int tx = atoi(argv[5]); const int ty = atoi(argv[6]); const int timesteps = atoi(argv[7]); void (*kernel)(float *, float *, const int , const int , const int , float ); // the first arg determins the row first or column first // 0: row first; 1: column first if (row_or_col == 0) kernel = &jacobi3d_7p_glmem; if (row_or_col == 1) kernel = &jacobi3d_7p_glmem_col; if (row_or_col == 2) kernel = &jacobi3d_7p_glmem_col_row; const int xyz = nx * ny * nz; const int xyz_bytes = xyz * sizeof(float); float *h_dA, *h_dA1; float *h_dB, *h_dB1; float *d_dA; float *d_dB; // Allocate host buffers checkCuda(cudaMallocHost((void**)&h_dA, xyz_bytes)); // host pinned checkCuda(cudaMallocHost((void**)&h_dB, xyz_bytes)); // for comparison btw CPU and GPU version checkCuda(cudaMallocHost((void**)&h_dA1, xyz_bytes)); checkCuda(cudaMallocHost((void**)&h_dB1, xyz_bytes)); // grid data iniatialization // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_dA[i] = 1 + (float)rand() / (float)RAND_MAX; h_dA1[i] = h_dB1[i] = h_dB[i] = h_dA[i]; } // A simple comparison of the result int testIndex = 3 + 3*nx+ 3*nx*ny; printf("Iniatialized data[%d]=%f\n", testIndex , h_dA[testIndex]); printf("h_dA1[%d]=%f\n", testIndex, h_dA1[testIndex]); printf("h_dB1[%d]=%f\n", testIndex, h_dB1[testIndex]); printf("Start computing... \n"); // Always use device 0 checkCuda(cudaSetDevice(0)); /* set the ratio of cache/shared memory cudaFuncCachePreferNone: Default function cache configuration, no preference cudaFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache cudaFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory */ //checkCuda(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); // Allocate device buffers checkCuda(cudaMalloc((void**)&d_dA, xyz_bytes)); checkCuda(cudaMalloc((void**)&d_dB, xyz_bytes)); cudaEvent_t start, stop; checkCuda(cudaEventCreate(&start)); checkCuda(cudaEventCreate(&stop)); float milliseconds = 0; checkCuda(cudaEventRecord(start)); // Copy to device checkCuda(cudaMemcpy(d_dA, h_dA, xyz_bytes, cudaMemcpyHostToDevice)); checkCuda(cudaEventRecord(stop)); checkCuda(cudaEventSynchronize(stop)); checkCuda(cudaEventElapsedTime(&milliseconds, start, stop)); printf("Data %dMB transferred H2D time:%f ms\n", xyz_bytes >> 20, milliseconds); printf("Bandwidth H2D:%f MB/s\n", (float)(xyz_bytes >> 20)/(milliseconds/1000)); checkCuda(cudaMemcpy(d_dB, d_dA, xyz_bytes, cudaMemcpyDeviceToDevice)); // Setup the kernel float* input = d_dA; float* output = d_dB; // modify nx/tx and ny/ty to (nx+tx-1)/tx and (ny+ty-1)/ty // inorder to avoid wrong configuration dim3 grid((nx+tx-1)/tx, (ny+ty-1)/ty); dim3 block(tx, ty); printf("grid:(%d, %d)\n", grid.x, grid.y); printf("block:(%d, %d)\n", tx, ty); float *tmp; float fac = 6.0/(h_dA[0] * h_dA[0]); checkCuda(cudaEventRecord(start)); for(int t = 0; t < timesteps; t += 1) { kernel<<<grid, block>>>(input, output, nx, ny, nz, fac); tmp = input; input = output; output = tmp; } checkCuda(cudaEventRecord(stop)); checkCuda(cudaEventSynchronize(stop)); checkCuda(cudaEventElapsedTime(&milliseconds, start, stop)); // Output the time and GFLOPs of the pure GPU kernel printf("GPU kernel Elapsed Time (pure GPU):%f ms\n", milliseconds); double gflop = (xyz * 1e-9) * 7.0 * timesteps; double gflop_per_sec = gflop * 1e3 / milliseconds; printf("(GPU) %lf GFlop/s\n", gflop_per_sec); double mupdate_per_sec = ((xyz >> 20) * timesteps) * 1e3 / milliseconds; printf("(GPU) %lf M updates/s\n", mupdate_per_sec); // Copy the result to main memory checkCuda(cudaEventRecord(start)); checkCuda(cudaMemcpy(h_dB, input, xyz_bytes, cudaMemcpyDeviceToHost)); checkCuda(cudaEventRecord(stop)); checkCuda(cudaEventSynchronize(stop)); checkCuda(cudaEventElapsedTime(&milliseconds, start, stop)); printf("Data %dMB transferred D2H time:%f ms\n", xyz_bytes >> 20, milliseconds); printf("Bandwidth D2H:%f MB/s\n", (float)(xyz_bytes >> 20)/(milliseconds/1000)); float *gpuResult = h_dB; float *tmp1; for(int t = 0; t < timesteps; t += 1) { jacobi7(nx, ny, nz, h_dA1, h_dB1, fac); tmp1 = h_dA1; h_dA1 = h_dB1; h_dB1 = tmp1; } float *cpuResult = h_dA1; // compare the results btw CPU and GPU version double errorNorm, refNorm, diff; errorNorm = 0.0; refNorm = 0.0; i = 0; for (; i < xyz; ++i){ diff = cpuResult[i] - gpuResult[i]; errorNorm += diff * diff; refNorm += cpuResult[i] * cpuResult[i]; } errorNorm = sqrt(errorNorm); refNorm = sqrt(refNorm); printf("Error Norm:%lf\n", errorNorm); printf("Ref Norm:%lf\n", refNorm); if(abs(refNorm) < 1e-7) { printf("Correctness, FAILED\n"); } else if((errorNorm / refNorm) > 1e-2) { printf("Correctness, FAILED\n"); } else { printf("Correctness, PASSED\n"); } printf("GPU[%d]=%f\n", testIndex, gpuResult[testIndex]); printf("CPU[%d]=%f\n", testIndex, cpuResult[testIndex]); printf("h_dA[%d]=%f\n", testIndex, h_dA[testIndex]); printf("h_dB[%d]=%f\n", testIndex, h_dB[testIndex]); printf("h_dA1[%d]=%f\n", testIndex, h_dA1[testIndex]); printf("h_dB1[%d]=%f\n", testIndex, h_dB1[testIndex]); // cleanup checkCuda( cudaEventDestroy(start)); checkCuda( cudaEventDestroy(stop)); cudaFreeHost(h_dA); cudaFreeHost(h_dB); cudaFreeHost(h_dA1); cudaFreeHost(h_dB1); cudaFree(d_dA); cudaFree(d_dB); return 0; }
0388ef638c0cda5699bc214750e127ce97a92e29.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "MatMulKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Md = NULL; hipMalloc(&Md, XSIZE*YSIZE); float *Nd = NULL; hipMalloc(&Nd, XSIZE*YSIZE); float *Pd = NULL; hipMalloc(&Pd, XSIZE*YSIZE); int width = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( MatMulKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,width); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( MatMulKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,width); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( MatMulKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Md,Nd,Pd,width); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0388ef638c0cda5699bc214750e127ce97a92e29.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "MatMulKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Md = NULL; cudaMalloc(&Md, XSIZE*YSIZE); float *Nd = NULL; cudaMalloc(&Nd, XSIZE*YSIZE); float *Pd = NULL; cudaMalloc(&Pd, XSIZE*YSIZE); int width = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); MatMulKernel<<<gridBlock,threadBlock>>>(Md,Nd,Pd,width); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { MatMulKernel<<<gridBlock,threadBlock>>>(Md,Nd,Pd,width); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { MatMulKernel<<<gridBlock,threadBlock>>>(Md,Nd,Pd,width); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fc817e0e5f673ccc3216860b596722f1688ce02f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kSmallerThanScalar(float* gData, float scalar, float* target, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x) target[i] = gData[i] < scalar; }
fc817e0e5f673ccc3216860b596722f1688ce02f.cu
#include "includes.h" __global__ void kSmallerThanScalar(float* gData, float scalar, float* target, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x) target[i] = gData[i] < scalar; }
8c4475dd6eb27b8342765128a83b88bae156b903.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main(int argc, char **argv) { // memory size 128 MBs int isize = 1<<25; int nbytes = isize * sizeof(float); // allocate the host memory //float *h_a = (float *)malloc(nbytes); float *h_a; hipHostMalloc((float **)&h_a, nbytes); // allocate the device memory float *d_a; hipMalloc((float **)&d_a, nbytes); // initialize the host memory for(int i=0;i<isize;i++) h_a[i] = 7; // transfer data from the host to the device hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice); // transfer data from the device to the host hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost); // free memory hipFree(d_a); //free(h_a); hipHostFree(h_a); // reset device hipDeviceReset(); return 0; }
8c4475dd6eb27b8342765128a83b88bae156b903.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main(int argc, char **argv) { // memory size 128 MBs int isize = 1<<25; int nbytes = isize * sizeof(float); // allocate the host memory //float *h_a = (float *)malloc(nbytes); float *h_a; cudaMallocHost((float **)&h_a, nbytes); // allocate the device memory float *d_a; cudaMalloc((float **)&d_a, nbytes); // initialize the host memory for(int i=0;i<isize;i++) h_a[i] = 7; // transfer data from the host to the device cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice); // transfer data from the device to the host cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost); // free memory cudaFree(d_a); //free(h_a); cudaFreeHost(h_a); // reset device cudaDeviceReset(); return 0; }
3cf52beb8089bb1b5a487efdca107f2826cd25ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zgerbt_func_batched.cu normal z -> d, Fri Sep 11 18:29:22 2015 @author Adrien Remy @author Azzam Haidar */ #include "common_magma.h" #include "dgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db DOUBLE_PRECISION array, dimension (n) The n vector db computed by DGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_dprbt_mtv_batched( magma_int_t n, double *du, double **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount); hipLaunchKernelGGL(( magmablas_dapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, du, n, db_array, 0); hipLaunchKernelGGL(( magmablas_dapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, du, n+n/2, db_array, n/2); threads = block_length; grid = magma_ceildiv( n, 2*block_length ); hipLaunchKernelGGL(( magmablas_dapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, du, 0, db_array, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db DOUBLE_PRECISION array, dimension (n) The n vector db computed by DGESV_NOPIV_GPU On exit db = dv*db @param[in] dv DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_dprbt_mv_batched( magma_int_t n, double *dv, double **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount); hipLaunchKernelGGL(( magmablas_dapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dv, 0, db_array, 0); threads = block_length; grid = magma_ceildiv( n, 4*block_length ); hipLaunchKernelGGL(( magmablas_dapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dv, n, db_array, 0); hipLaunchKernelGGL(( magmablas_dapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dv, n+n/2, db_array, n/2); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA DOUBLE_PRECISION array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_dprbt_batched( magma_int_t n, double **dA_array, magma_int_t ldda, double *du, double *dv, magma_int_t batchCount, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), batchCount ); hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, 0, ldda, du, 0, dv, 0); hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2); hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, n/2, ldda, du, n/2, dv, 0); hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), batchCount ); hipLaunchKernelGGL(( magmablas_delementary_multiplication_kernel_batched), dim3(grid2), dim3(threads2), 0, queue , n, dA_array, 0, ldda, du, -ldda, dv, -ldda); }
3cf52beb8089bb1b5a487efdca107f2826cd25ef.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zgerbt_func_batched.cu normal z -> d, Fri Sep 11 18:29:22 2015 @author Adrien Remy @author Azzam Haidar */ #include "common_magma.h" #include "dgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db DOUBLE_PRECISION array, dimension (n) The n vector db computed by DGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_dprbt_mtv_batched( magma_int_t n, double *du, double **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount); magmablas_dapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, du, n, db_array, 0); magmablas_dapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, du, n+n/2, db_array, n/2); threads = block_length; grid = magma_ceildiv( n, 2*block_length ); magmablas_dapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue >>>(n, du, 0, db_array, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db DOUBLE_PRECISION array, dimension (n) The n vector db computed by DGESV_NOPIV_GPU On exit db = dv*db @param[in] dv DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_dprbt_mv_batched( magma_int_t n, double *dv, double **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount); magmablas_dapply_vector_kernel_batched<<< grid, threads, 0, queue >>>(n, dv, 0, db_array, 0); threads = block_length; grid = magma_ceildiv( n, 4*block_length ); magmablas_dapply_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dv, n, db_array, 0); magmablas_dapply_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dv, n+n/2, db_array, n/2); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA DOUBLE_PRECISION array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv DOUBLE_PRECISION array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. ********************************************************************/ extern "C" void magmablas_dprbt_batched( magma_int_t n, double **dA_array, magma_int_t ldda, double *du, double *dv, magma_int_t batchCount, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), batchCount ); magmablas_delementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, 0, ldda, du, 0, dv, 0); magmablas_delementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2); magmablas_delementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, n/2, ldda, du, n/2, dv, 0); magmablas_delementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), batchCount ); magmablas_delementary_multiplication_kernel_batched<<< grid2, threads2, 0, queue >>>(n, dA_array, 0, ldda, du, -ldda, dv, -ldda); }
a20503aad483c285371b851fbb8acd66fb6a115e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_ppcg_inner1_kernel; int xdim0_tea_leaf_ppcg_inner1_kernel_h = -1; __constant__ int xdim1_tea_leaf_ppcg_inner1_kernel; int xdim1_tea_leaf_ppcg_inner1_kernel_h = -1; __constant__ int xdim2_tea_leaf_ppcg_inner1_kernel; int xdim2_tea_leaf_ppcg_inner1_kernel_h = -1; __constant__ int xdim3_tea_leaf_ppcg_inner1_kernel; int xdim3_tea_leaf_ppcg_inner1_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y) (x + xdim0_tea_leaf_ppcg_inner1_kernel * (y)) #define OPS_ACC1(x, y) (x + xdim1_tea_leaf_ppcg_inner1_kernel * (y)) #define OPS_ACC2(x, y) (x + xdim2_tea_leaf_ppcg_inner1_kernel * (y)) #define OPS_ACC3(x, y) (x + xdim3_tea_leaf_ppcg_inner1_kernel * (y)) // user function __device__ void tea_leaf_ppcg_inner1_kernel_gpu(double *rtemp, const double *Kx, const double *Ky, const double *sd, const double *rx, const double *ry) { double smvp = 0.0; smvp = (1.0 + (*ry) * (Ky[OPS_ACC2(0, 1)] + Ky[OPS_ACC2(0, 0)]) + (*rx) * (Kx[OPS_ACC1(1, 0)] + Kx[OPS_ACC1(0, 0)])) * sd[OPS_ACC3(0, 0)] - (*ry) * (Ky[OPS_ACC2(0, 1)] * sd[OPS_ACC3(0, 1)] + Ky[OPS_ACC2(0, 0)] * sd[OPS_ACC3(0, -1)]) - (*rx) * (Kx[OPS_ACC1(1, 0)] * sd[OPS_ACC3(1, 0)] + Kx[OPS_ACC1(0, 0)] * sd[OPS_ACC3(-1, 0)]); rtemp[OPS_ACC0(0, 0)] = rtemp[OPS_ACC0(0, 0)] - smvp; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_tea_leaf_ppcg_inner1_kernel( double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double arg4, const double arg5, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_ppcg_inner1_kernel; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_ppcg_inner1_kernel; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_ppcg_inner1_kernel; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_tea_leaf_ppcg_inner1_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_ppcg_inner1_kernel_gpu(arg0, arg1, arg2, arg3, &arg4, &arg5); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_inner1_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { #else void ops_par_loop_tea_leaf_ppcg_inner1_kernel_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; #endif // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 6, range, 46)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(46, "tea_leaf_ppcg_inner1_kernel"); OPS_kernels[46].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; if (xdim0 != xdim0_tea_leaf_ppcg_inner1_kernel_h || xdim1 != xdim1_tea_leaf_ppcg_inner1_kernel_h || xdim2 != xdim2_tea_leaf_ppcg_inner1_kernel_h || xdim3 != xdim3_tea_leaf_ppcg_inner1_kernel_h) { hipMemcpyToSymbol(xdim0_tea_leaf_ppcg_inner1_kernel, &xdim0, sizeof(int)); xdim0_tea_leaf_ppcg_inner1_kernel_h = xdim0; hipMemcpyToSymbol(xdim1_tea_leaf_ppcg_inner1_kernel, &xdim1, sizeof(int)); xdim1_tea_leaf_ppcg_inner1_kernel_h = xdim1; hipMemcpyToSymbol(xdim2_tea_leaf_ppcg_inner1_kernel, &xdim2, sizeof(int)); xdim2_tea_leaf_ppcg_inner1_kernel_h = xdim2; hipMemcpyToSymbol(xdim3_tea_leaf_ppcg_inner1_kernel, &xdim3, sizeof(int)); xdim3_tea_leaf_ppcg_inner1_kernel_h = xdim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[6]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[46].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_tea_leaf_ppcg_inner1_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], *(double *)arg4.data, *(double *)arg5.data, x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[46].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[46].mpi_time += t2 - t1; OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_inner1_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 46; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 46; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 6; desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; char *tmp = (char *)malloc(1 * sizeof(double)); memcpy(tmp, arg4.data, 1 * sizeof(double)); desc->args[4].data = tmp; desc->args[5] = arg5; tmp = (char *)malloc(1 * sizeof(double)); memcpy(tmp, arg5.data, 1 * sizeof(double)); desc->args[5].data = tmp; desc->function = ops_par_loop_tea_leaf_ppcg_inner1_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(46, "tea_leaf_ppcg_inner1_kernel"); } ops_enqueue_kernel(desc); } #endif
a20503aad483c285371b851fbb8acd66fb6a115e.cu
// // auto-generated by ops.py // __constant__ int xdim0_tea_leaf_ppcg_inner1_kernel; int xdim0_tea_leaf_ppcg_inner1_kernel_h = -1; __constant__ int xdim1_tea_leaf_ppcg_inner1_kernel; int xdim1_tea_leaf_ppcg_inner1_kernel_h = -1; __constant__ int xdim2_tea_leaf_ppcg_inner1_kernel; int xdim2_tea_leaf_ppcg_inner1_kernel_h = -1; __constant__ int xdim3_tea_leaf_ppcg_inner1_kernel; int xdim3_tea_leaf_ppcg_inner1_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x, y) (x + xdim0_tea_leaf_ppcg_inner1_kernel * (y)) #define OPS_ACC1(x, y) (x + xdim1_tea_leaf_ppcg_inner1_kernel * (y)) #define OPS_ACC2(x, y) (x + xdim2_tea_leaf_ppcg_inner1_kernel * (y)) #define OPS_ACC3(x, y) (x + xdim3_tea_leaf_ppcg_inner1_kernel * (y)) // user function __device__ void tea_leaf_ppcg_inner1_kernel_gpu(double *rtemp, const double *Kx, const double *Ky, const double *sd, const double *rx, const double *ry) { double smvp = 0.0; smvp = (1.0 + (*ry) * (Ky[OPS_ACC2(0, 1)] + Ky[OPS_ACC2(0, 0)]) + (*rx) * (Kx[OPS_ACC1(1, 0)] + Kx[OPS_ACC1(0, 0)])) * sd[OPS_ACC3(0, 0)] - (*ry) * (Ky[OPS_ACC2(0, 1)] * sd[OPS_ACC3(0, 1)] + Ky[OPS_ACC2(0, 0)] * sd[OPS_ACC3(0, -1)]) - (*rx) * (Kx[OPS_ACC1(1, 0)] * sd[OPS_ACC3(1, 0)] + Kx[OPS_ACC1(0, 0)] * sd[OPS_ACC3(-1, 0)]); rtemp[OPS_ACC0(0, 0)] = rtemp[OPS_ACC0(0, 0)] - smvp; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_tea_leaf_ppcg_inner1_kernel( double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double arg4, const double arg5, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_ppcg_inner1_kernel; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_ppcg_inner1_kernel; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_ppcg_inner1_kernel; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_tea_leaf_ppcg_inner1_kernel; if (idx_x < size0 && idx_y < size1) { tea_leaf_ppcg_inner1_kernel_gpu(arg0, arg1, arg2, arg3, &arg4, &arg5); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_inner1_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { #else void ops_par_loop_tea_leaf_ppcg_inner1_kernel_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; #endif // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 6, range, 46)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(46, "tea_leaf_ppcg_inner1_kernel"); OPS_kernels[46].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; if (xdim0 != xdim0_tea_leaf_ppcg_inner1_kernel_h || xdim1 != xdim1_tea_leaf_ppcg_inner1_kernel_h || xdim2 != xdim2_tea_leaf_ppcg_inner1_kernel_h || xdim3 != xdim3_tea_leaf_ppcg_inner1_kernel_h) { cudaMemcpyToSymbol(xdim0_tea_leaf_ppcg_inner1_kernel, &xdim0, sizeof(int)); xdim0_tea_leaf_ppcg_inner1_kernel_h = xdim0; cudaMemcpyToSymbol(xdim1_tea_leaf_ppcg_inner1_kernel, &xdim1, sizeof(int)); xdim1_tea_leaf_ppcg_inner1_kernel_h = xdim1; cudaMemcpyToSymbol(xdim2_tea_leaf_ppcg_inner1_kernel, &xdim2, sizeof(int)); xdim2_tea_leaf_ppcg_inner1_kernel_h = xdim2; cudaMemcpyToSymbol(xdim3_tea_leaf_ppcg_inner1_kernel, &xdim3, sizeof(int)); xdim3_tea_leaf_ppcg_inner1_kernel_h = xdim3; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[6]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[46].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_tea_leaf_ppcg_inner1_kernel<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], *(double *)arg4.data, *(double *)arg5.data, x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[46].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[46].mpi_time += t2 - t1; OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[46].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_inner1_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 46; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 46; for (int i = 0; i < 4; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 6; desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; char *tmp = (char *)malloc(1 * sizeof(double)); memcpy(tmp, arg4.data, 1 * sizeof(double)); desc->args[4].data = tmp; desc->args[5] = arg5; tmp = (char *)malloc(1 * sizeof(double)); memcpy(tmp, arg5.data, 1 * sizeof(double)); desc->args[5].data = tmp; desc->function = ops_par_loop_tea_leaf_ppcg_inner1_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(46, "tea_leaf_ppcg_inner1_kernel"); } ops_enqueue_kernel(desc); } #endif
a20424b41556b189adc3a73cf5794c25f08d05ce.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
a20424b41556b189adc3a73cf5794c25f08d05ce.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
fc3472898dd48d26be57a880e706f8f37bf5c55d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> /* memcpy */ #include <math.h> #include <stdint.h> void *cuda_upload_var(void *host_var, int size) { void *cuda_var; hipMalloc(&cuda_var, 4); hipMemcpy(cuda_var, host_var, size, hipMemcpyHostToDevice); return cuda_var; } void cuda_download_var(void *cuda_var, void *host_var, int size) { hipMemcpy(host_var, cuda_var, size, hipMemcpyDeviceToHost); hipFree(cuda_var); } typedef struct intmat2x2 { int m[4]; } intmat2x2; intmat2x2 intmat2x2_mul(intmat2x2 lhs, intmat2x2 rhs) { intmat2x2 ret; ret.m[0] = lhs.m[0]*rhs.m[0] + lhs.m[1]*rhs.m[2]; ret.m[2] = lhs.m[2]*rhs.m[0] + lhs.m[3]*rhs.m[2]; ret.m[1] = lhs.m[0]*rhs.m[1] + lhs.m[1]*rhs.m[3]; ret.m[3] = lhs.m[2]*rhs.m[1] + lhs.m[3]*rhs.m[3]; return ret; } typedef struct floatmat3x3 { float m[9]; } floatmat3x3; floatmat3x3 floatmat3x3_mul(floatmat3x3 lhs, floatmat3x3 rhs) { floatmat3x3 ret; ret.m[0] = lhs.m[0]*rhs.m[0] + lhs.m[1]*rhs.m[3] + lhs.m[2]*rhs.m[6]; ret.m[3] = lhs.m[3]*rhs.m[0] + lhs.m[4]*rhs.m[3] + lhs.m[5]*rhs.m[6]; ret.m[6] = lhs.m[6]*rhs.m[0] + lhs.m[7]*rhs.m[3] + lhs.m[8]*rhs.m[6]; ret.m[1] = lhs.m[0]*rhs.m[1] + lhs.m[1]*rhs.m[4] + lhs.m[2]*rhs.m[7]; ret.m[4] = lhs.m[3]*rhs.m[1] + lhs.m[4]*rhs.m[4] + lhs.m[5]*rhs.m[7]; ret.m[7] = lhs.m[6]*rhs.m[1] + lhs.m[7]*rhs.m[4] + lhs.m[8]*rhs.m[7]; ret.m[2] = lhs.m[0]*rhs.m[2] + lhs.m[1]*rhs.m[5] + lhs.m[2]*rhs.m[8]; ret.m[5] = lhs.m[3]*rhs.m[2] + lhs.m[4]*rhs.m[5] + lhs.m[5]*rhs.m[8]; ret.m[8] = lhs.m[6]*rhs.m[2] + lhs.m[7]*rhs.m[5] + lhs.m[8]*rhs.m[8]; return ret; } int main(int argc, char **argv) { intmat2x2 mat1; intmat2x2 mat2; floatmat3x3 mat3; floatmat3x3 mat4; int i; int k; mat1.m[1*0 + 2*0] = 0; mat1.m[1*1 + 2*0] = 1; mat1.m[1*1 + 2*1] = 2; mat1.m[1*0 + 2*1] = 3; mat2.m[1*0 + 2*0] = 0; mat2.m[1*1 + 2*0] = 1; mat2.m[1*1 + 2*1] = 2; mat2.m[1*0 + 2*1] = 3; for (i = 0; i < 3; i = i + 1) { for (k = 0; k < 3; k = k + 1) { mat3.m[1*i + 3*k] = 1; mat4.m[1*i + 3*k] = 2; } } mat1 = intmat2x2_mul(mat1, mat2); mat3 = floatmat3x3_mul(floatmat3x3_mul(floatmat3x3_mul(mat4, mat3), mat3), mat3); return 0; }
fc3472898dd48d26be57a880e706f8f37bf5c55d.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> /* memcpy */ #include <math.h> #include <stdint.h> void *cuda_upload_var(void *host_var, int size) { void *cuda_var; cudaMalloc(&cuda_var, 4); cudaMemcpy(cuda_var, host_var, size, cudaMemcpyHostToDevice); return cuda_var; } void cuda_download_var(void *cuda_var, void *host_var, int size) { cudaMemcpy(host_var, cuda_var, size, cudaMemcpyDeviceToHost); cudaFree(cuda_var); } typedef struct intmat2x2 { int m[4]; } intmat2x2; intmat2x2 intmat2x2_mul(intmat2x2 lhs, intmat2x2 rhs) { intmat2x2 ret; ret.m[0] = lhs.m[0]*rhs.m[0] + lhs.m[1]*rhs.m[2]; ret.m[2] = lhs.m[2]*rhs.m[0] + lhs.m[3]*rhs.m[2]; ret.m[1] = lhs.m[0]*rhs.m[1] + lhs.m[1]*rhs.m[3]; ret.m[3] = lhs.m[2]*rhs.m[1] + lhs.m[3]*rhs.m[3]; return ret; } typedef struct floatmat3x3 { float m[9]; } floatmat3x3; floatmat3x3 floatmat3x3_mul(floatmat3x3 lhs, floatmat3x3 rhs) { floatmat3x3 ret; ret.m[0] = lhs.m[0]*rhs.m[0] + lhs.m[1]*rhs.m[3] + lhs.m[2]*rhs.m[6]; ret.m[3] = lhs.m[3]*rhs.m[0] + lhs.m[4]*rhs.m[3] + lhs.m[5]*rhs.m[6]; ret.m[6] = lhs.m[6]*rhs.m[0] + lhs.m[7]*rhs.m[3] + lhs.m[8]*rhs.m[6]; ret.m[1] = lhs.m[0]*rhs.m[1] + lhs.m[1]*rhs.m[4] + lhs.m[2]*rhs.m[7]; ret.m[4] = lhs.m[3]*rhs.m[1] + lhs.m[4]*rhs.m[4] + lhs.m[5]*rhs.m[7]; ret.m[7] = lhs.m[6]*rhs.m[1] + lhs.m[7]*rhs.m[4] + lhs.m[8]*rhs.m[7]; ret.m[2] = lhs.m[0]*rhs.m[2] + lhs.m[1]*rhs.m[5] + lhs.m[2]*rhs.m[8]; ret.m[5] = lhs.m[3]*rhs.m[2] + lhs.m[4]*rhs.m[5] + lhs.m[5]*rhs.m[8]; ret.m[8] = lhs.m[6]*rhs.m[2] + lhs.m[7]*rhs.m[5] + lhs.m[8]*rhs.m[8]; return ret; } int main(int argc, char **argv) { intmat2x2 mat1; intmat2x2 mat2; floatmat3x3 mat3; floatmat3x3 mat4; int i; int k; mat1.m[1*0 + 2*0] = 0; mat1.m[1*1 + 2*0] = 1; mat1.m[1*1 + 2*1] = 2; mat1.m[1*0 + 2*1] = 3; mat2.m[1*0 + 2*0] = 0; mat2.m[1*1 + 2*0] = 1; mat2.m[1*1 + 2*1] = 2; mat2.m[1*0 + 2*1] = 3; for (i = 0; i < 3; i = i + 1) { for (k = 0; k < 3; k = k + 1) { mat3.m[1*i + 3*k] = 1; mat4.m[1*i + 3*k] = 2; } } mat1 = intmat2x2_mul(mat1, mat2); mat3 = floatmat3x3_mul(floatmat3x3_mul(floatmat3x3_mul(mat4, mat3), mat3), mat3); return 0; }
788cd286dff75cf7785b79674e5f0fac045f70a9.hip
// !!! This is a file automatically generated by hipify!!! #ifdef _WIN32 #define WINDOWS_LEAN_AND_MEAN #define NOMINMAX #include <windows.h> #endif #include "bucketsort.cuh" #include "context.cuh" #include "helper_cuda.h" #include "helper_timer.h" #include "mergesort.cuh" #include <float.h> #include <gloop/statistics.h> #include <gloop/initialize.cuh> #include <gloop/gloop.h> #include <iostream> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> using namespace std; //////////////////////////////////////////////////////////////////////////////// // Size of the testset (Bitwise shift of 1 over 22 places) //////////////////////////////////////////////////////////////////////////////// // #define SIZE (1 << 22) #define SIZE (1 << 27) //////////////////////////////////////////////////////////////////////////////// // Number of tests to average over //////////////////////////////////////////////////////////////////////////////// #define TEST 4 //////////////////////////////////////////////////////////////////////////////// // The timers for the different parts of the algo //////////////////////////////////////////////////////////////////////////////// StopWatchInterface *uploadTimer, *downloadTimer, *bucketTimer, *mergeTimer, *totalTimer, *cpuTimer; //////////////////////////////////////////////////////////////////////////////// // Compare method for CPU sort //////////////////////////////////////////////////////////////////////////////// inline int compare(const void* a, const void* b) { if (*((float*)a) < *((float*)b)) return -1; else if (*((float*)a) > *((float*)b)) return 1; else return 0; } //////////////////////////////////////////////////////////////////////////////// // Forward declaration //////////////////////////////////////////////////////////////////////////////// void cudaSort(Context* context, float* origList, float minimum, float maximum, float* resultList, int numElements); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { { gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope; // Create timers for each sort sdkCreateTimer(&uploadTimer); sdkCreateTimer(&downloadTimer); sdkCreateTimer(&bucketTimer); sdkCreateTimer(&mergeTimer); sdkCreateTimer(&totalTimer); sdkCreateTimer(&cpuTimer); int numElements = 0; // Number of elements in the test bed if (strcmp(argv[1], "r") == 0) { numElements = SIZE; } else { gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope; FILE* fp; fp = fopen(argv[1], "r"); if (fp == NULL) { cout << "Error reading file" << endl; exit(EXIT_FAILURE); } int count = 0; float c; while (fscanf(fp, "%f", &c) != EOF) { count++; } fclose(fp); numElements = count; } cout << "Sorting list of " << numElements << " floats\n"; // Generate random data // Memory space the list of random floats will take up int mem_size = numElements * sizeof(float); // Allocate enough for the input list float* cpu_idata = (float*)malloc(mem_size); // Allocate enough for the output list on the cpu side float* cpu_odata = (float*)malloc(mem_size); // Allocate enough memory for the output list on the gpu side float* gpu_odata = (float*)malloc(mem_size); float datamin = FLT_MAX; float datamax = -FLT_MAX; if (strcmp(argv[1], "r") == 0) { for (int i = 0; i < numElements; i++) { // Generate random floats between 0 and 1 for the input data cpu_idata[i] = ((float)rand() / RAND_MAX); //Compare data at index to data minimum, if less than current minimum, set that element as new minimum datamin = min(cpu_idata[i], datamin); //Same as above but for maximum datamax = max(cpu_idata[i], datamax); } } else { gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope; FILE* fp; fp = fopen(argv[1], "r"); for (int i = 0; i < numElements; i++) { fscanf(fp, "%f", &cpu_idata[i]); datamin = min(cpu_idata[i], datamin); datamax = max(cpu_idata[i], datamax); } } { gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope; gloop::eagerlyInitializeContext(); } cout << "Sorting on GPU..." << flush; { gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope; Context context{ nullptr, nullptr, }; CUDA_SAFE_CALL(hipMalloc(&context.device, sizeof(DeviceContext))); CUDA_SAFE_CALL(hipHostMalloc(&context.continuing, sizeof(int), hipHostMallocMapped)); *context.continuing = 0; CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize, 4 << 30)); { gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope; // GPU Sort for (int i = 0; i < TEST; i++) cudaSort(&context, cpu_idata, datamin, datamax, gpu_odata, numElements); cout << "done.\n"; } } { gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope; gloop::eagerlyFinalizeContext(); } #ifdef VERIFY cout << "Sorting on CPU..." << flush; // CPU Sort memcpy(cpu_odata, cpu_idata, mem_size); sdkStartTimer(&cpuTimer); qsort(cpu_odata, numElements, sizeof(float), compare); sdkStopTimer(&cpuTimer); cout << "done.\n"; cout << "Checking result..." << flush; // Result checking int count = 0; for (int i = 0; i < numElements; i++) if (cpu_odata[i] != gpu_odata[i]) { printf("Sort missmatch on element %d: \n", i); printf("CPU = %f : GPU = %f\n", cpu_odata[i], gpu_odata[i]); count++; break; } if (count == 0) cout << "PASSED.\n"; else cout << "FAILED.\n"; #endif // Timer report printf("GPU iterations: %d\n", TEST); #ifdef TIMER #ifdef VERIFY printf("Average CPU execution time: %f ms\n", sdkGetTimerValue(&cpuTimer)); #endif printf("Average GPU execution time: %f ms\n", sdkGetTimerValue(&totalTimer) / TEST); printf(" - Upload : %f ms\n", sdkGetTimerValue(&uploadTimer) / TEST); printf(" - Download : %f ms\n", sdkGetTimerValue(&downloadTimer) / TEST); printf(" - Bucket sort : %f ms\n", sdkGetTimerValue(&bucketTimer) / TEST); printf(" - Merge sort : %f ms\n", sdkGetTimerValue(&mergeTimer) / TEST); #endif #ifdef OUTPUT FILE* tp; const char filename2[] = "./hybridoutput.txt"; tp = fopen(filename2, "w"); for (int i = 0; i < numElements; i++) { fprintf(tp, "%f ", cpu_idata[i]); } fclose(tp); #endif // Release memory sdkDeleteTimer(&uploadTimer); sdkDeleteTimer(&downloadTimer); sdkDeleteTimer(&bucketTimer); sdkDeleteTimer(&mergeTimer); sdkDeleteTimer(&totalTimer); sdkDeleteTimer(&cpuTimer); free(cpu_idata); free(cpu_odata); free(gpu_odata); } gloop::Statistics::instance().report(stderr); } void cudaSort(Context* ctx, float* origList, float minimum, float maximum, float* resultList, int numElements) { gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope; // Initialization and upload data float* d_input = NULL; float* d_output = NULL; int mem_size = (numElements + DIVISIONS * 4) * sizeof(float); sdkStartTimer(&uploadTimer); { gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope; hipMalloc((void**)&d_input, mem_size); hipMalloc((void**)&d_output, mem_size); hipMemcpy((void*)d_input, (void*)origList, numElements * sizeof(float), hipMemcpyHostToDevice); init_bucketsort(numElements); } sdkStopTimer(&uploadTimer); sdkStartTimer(&totalTimer); // Bucketsort the list sdkStartTimer(&bucketTimer); int* sizes = (int*)malloc(DIVISIONS * sizeof(int)); int* nullElements = (int*)malloc(DIVISIONS * sizeof(int)); unsigned int* origOffsets = (unsigned int*)malloc((DIVISIONS + 1) * sizeof(int)); bucketSort(ctx, d_input, d_output, numElements, sizes, nullElements, minimum, maximum, origOffsets); sdkStopTimer(&bucketTimer); // Mergesort the result sdkStartTimer(&mergeTimer); float4 *d_origList = (float4 *)d_output, *d_resultList = (float4 *)d_input; int newlistsize = 0; for (int i = 0; i < DIVISIONS; i++) newlistsize += sizes[i] * 4; float4* mergeresult = runMergeSort(ctx, newlistsize, DIVISIONS, d_origList, d_resultList, sizes, nullElements, origOffsets); //d_origList; // hipDeviceSynchronize(); sdkStopTimer(&mergeTimer); sdkStopTimer(&totalTimer); // Download result sdkStartTimer(&downloadTimer); { gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope; checkCudaErrors(hipMemcpy((void*)resultList, (void*)mergeresult, numElements * sizeof(float), hipMemcpyDeviceToHost)); } sdkStopTimer(&downloadTimer); // Clean up { finish_bucketsort(); { gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope; hipFree(d_input); hipFree(d_output); } } free(nullElements); free(sizes); }
788cd286dff75cf7785b79674e5f0fac045f70a9.cu
#ifdef _WIN32 #define WINDOWS_LEAN_AND_MEAN #define NOMINMAX #include <windows.h> #endif #include "bucketsort.cuh" #include "context.cuh" #include "helper_cuda.h" #include "helper_timer.h" #include "mergesort.cuh" #include <float.h> #include <gloop/statistics.h> #include <gloop/initialize.cuh> #include <gloop/gloop.h> #include <iostream> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> using namespace std; //////////////////////////////////////////////////////////////////////////////// // Size of the testset (Bitwise shift of 1 over 22 places) //////////////////////////////////////////////////////////////////////////////// // #define SIZE (1 << 22) #define SIZE (1 << 27) //////////////////////////////////////////////////////////////////////////////// // Number of tests to average over //////////////////////////////////////////////////////////////////////////////// #define TEST 4 //////////////////////////////////////////////////////////////////////////////// // The timers for the different parts of the algo //////////////////////////////////////////////////////////////////////////////// StopWatchInterface *uploadTimer, *downloadTimer, *bucketTimer, *mergeTimer, *totalTimer, *cpuTimer; //////////////////////////////////////////////////////////////////////////////// // Compare method for CPU sort //////////////////////////////////////////////////////////////////////////////// inline int compare(const void* a, const void* b) { if (*((float*)a) < *((float*)b)) return -1; else if (*((float*)a) > *((float*)b)) return 1; else return 0; } //////////////////////////////////////////////////////////////////////////////// // Forward declaration //////////////////////////////////////////////////////////////////////////////// void cudaSort(Context* context, float* origList, float minimum, float maximum, float* resultList, int numElements); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { { gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope; // Create timers for each sort sdkCreateTimer(&uploadTimer); sdkCreateTimer(&downloadTimer); sdkCreateTimer(&bucketTimer); sdkCreateTimer(&mergeTimer); sdkCreateTimer(&totalTimer); sdkCreateTimer(&cpuTimer); int numElements = 0; // Number of elements in the test bed if (strcmp(argv[1], "r") == 0) { numElements = SIZE; } else { gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope; FILE* fp; fp = fopen(argv[1], "r"); if (fp == NULL) { cout << "Error reading file" << endl; exit(EXIT_FAILURE); } int count = 0; float c; while (fscanf(fp, "%f", &c) != EOF) { count++; } fclose(fp); numElements = count; } cout << "Sorting list of " << numElements << " floats\n"; // Generate random data // Memory space the list of random floats will take up int mem_size = numElements * sizeof(float); // Allocate enough for the input list float* cpu_idata = (float*)malloc(mem_size); // Allocate enough for the output list on the cpu side float* cpu_odata = (float*)malloc(mem_size); // Allocate enough memory for the output list on the gpu side float* gpu_odata = (float*)malloc(mem_size); float datamin = FLT_MAX; float datamax = -FLT_MAX; if (strcmp(argv[1], "r") == 0) { for (int i = 0; i < numElements; i++) { // Generate random floats between 0 and 1 for the input data cpu_idata[i] = ((float)rand() / RAND_MAX); //Compare data at index to data minimum, if less than current minimum, set that element as new minimum datamin = min(cpu_idata[i], datamin); //Same as above but for maximum datamax = max(cpu_idata[i], datamax); } } else { gloop::Statistics::Scope<gloop::Statistics::Type::IO> scope; FILE* fp; fp = fopen(argv[1], "r"); for (int i = 0; i < numElements; i++) { fscanf(fp, "%f", &cpu_idata[i]); datamin = min(cpu_idata[i], datamin); datamax = max(cpu_idata[i], datamax); } } { gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope; gloop::eagerlyInitializeContext(); } cout << "Sorting on GPU..." << flush; { gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope; Context context{ nullptr, nullptr, }; CUDA_SAFE_CALL(cudaMalloc(&context.device, sizeof(DeviceContext))); CUDA_SAFE_CALL(cudaHostAlloc(&context.continuing, sizeof(int), cudaHostAllocMapped)); *context.continuing = 0; CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4 << 30)); { gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope; // GPU Sort for (int i = 0; i < TEST; i++) cudaSort(&context, cpu_idata, datamin, datamax, gpu_odata, numElements); cout << "done.\n"; } } { gloop::Statistics::Scope<gloop::Statistics::Type::GPUInit> scope; gloop::eagerlyFinalizeContext(); } #ifdef VERIFY cout << "Sorting on CPU..." << flush; // CPU Sort memcpy(cpu_odata, cpu_idata, mem_size); sdkStartTimer(&cpuTimer); qsort(cpu_odata, numElements, sizeof(float), compare); sdkStopTimer(&cpuTimer); cout << "done.\n"; cout << "Checking result..." << flush; // Result checking int count = 0; for (int i = 0; i < numElements; i++) if (cpu_odata[i] != gpu_odata[i]) { printf("Sort missmatch on element %d: \n", i); printf("CPU = %f : GPU = %f\n", cpu_odata[i], gpu_odata[i]); count++; break; } if (count == 0) cout << "PASSED.\n"; else cout << "FAILED.\n"; #endif // Timer report printf("GPU iterations: %d\n", TEST); #ifdef TIMER #ifdef VERIFY printf("Average CPU execution time: %f ms\n", sdkGetTimerValue(&cpuTimer)); #endif printf("Average GPU execution time: %f ms\n", sdkGetTimerValue(&totalTimer) / TEST); printf(" - Upload : %f ms\n", sdkGetTimerValue(&uploadTimer) / TEST); printf(" - Download : %f ms\n", sdkGetTimerValue(&downloadTimer) / TEST); printf(" - Bucket sort : %f ms\n", sdkGetTimerValue(&bucketTimer) / TEST); printf(" - Merge sort : %f ms\n", sdkGetTimerValue(&mergeTimer) / TEST); #endif #ifdef OUTPUT FILE* tp; const char filename2[] = "./hybridoutput.txt"; tp = fopen(filename2, "w"); for (int i = 0; i < numElements; i++) { fprintf(tp, "%f ", cpu_idata[i]); } fclose(tp); #endif // Release memory sdkDeleteTimer(&uploadTimer); sdkDeleteTimer(&downloadTimer); sdkDeleteTimer(&bucketTimer); sdkDeleteTimer(&mergeTimer); sdkDeleteTimer(&totalTimer); sdkDeleteTimer(&cpuTimer); free(cpu_idata); free(cpu_odata); free(gpu_odata); } gloop::Statistics::instance().report(stderr); } void cudaSort(Context* ctx, float* origList, float minimum, float maximum, float* resultList, int numElements) { gloop::Statistics::Scope<gloop::Statistics::Type::DataInit> scope; // Initialization and upload data float* d_input = NULL; float* d_output = NULL; int mem_size = (numElements + DIVISIONS * 4) * sizeof(float); sdkStartTimer(&uploadTimer); { gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope; cudaMalloc((void**)&d_input, mem_size); cudaMalloc((void**)&d_output, mem_size); cudaMemcpy((void*)d_input, (void*)origList, numElements * sizeof(float), cudaMemcpyHostToDevice); init_bucketsort(numElements); } sdkStopTimer(&uploadTimer); sdkStartTimer(&totalTimer); // Bucketsort the list sdkStartTimer(&bucketTimer); int* sizes = (int*)malloc(DIVISIONS * sizeof(int)); int* nullElements = (int*)malloc(DIVISIONS * sizeof(int)); unsigned int* origOffsets = (unsigned int*)malloc((DIVISIONS + 1) * sizeof(int)); bucketSort(ctx, d_input, d_output, numElements, sizes, nullElements, minimum, maximum, origOffsets); sdkStopTimer(&bucketTimer); // Mergesort the result sdkStartTimer(&mergeTimer); float4 *d_origList = (float4 *)d_output, *d_resultList = (float4 *)d_input; int newlistsize = 0; for (int i = 0; i < DIVISIONS; i++) newlistsize += sizes[i] * 4; float4* mergeresult = runMergeSort(ctx, newlistsize, DIVISIONS, d_origList, d_resultList, sizes, nullElements, origOffsets); //d_origList; // cudaThreadSynchronize(); sdkStopTimer(&mergeTimer); sdkStopTimer(&totalTimer); // Download result sdkStartTimer(&downloadTimer); { gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope; checkCudaErrors(cudaMemcpy((void*)resultList, (void*)mergeresult, numElements * sizeof(float), cudaMemcpyDeviceToHost)); } sdkStopTimer(&downloadTimer); // Clean up { finish_bucketsort(); { gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope; cudaFree(d_input); cudaFree(d_output); } } free(nullElements); free(sizes); }
5c9b2954d8fcf0408e9222e0bffea77ba7039aba.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include <device_launch_parameters.h> #include <hip/hip_runtime_api.h> void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; if (idata[index] == 0) bools[index] = 0; else bools[index] = 1; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; if (bools[index]) odata[indices[index]] = idata[index]; } } }
5c9b2954d8fcf0408e9222e0bffea77ba7039aba.cu
#include "common.h" #include <device_launch_parameters.h> #include <cuda_runtime_api.h> void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; if (idata[index] == 0) bools[index] = 0; else bools[index] = 1; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; if (bools[index]) odata[indices[index]] = idata[index]; } } }
273a8cd9f8cce17bad046104073c9c8e9bba2aa4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #define TILE_WIDTH 2 __global__ void MatrixMulKernel(float *d_M , float *d_N , float *d_P , int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; // Loop over the d_M and d_N tiles required to compute d_P element // ph indicate number of phase for (int ph = 0; ph < ceil(Width/(float)TILE_WIDTH); ++ph) { // Collaborative loading of d_M and d_N tiles into shared memory if ((Row < Width) && ((ph*TILE_WIDTH + tx) < Width)) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; else Mds[ty][tx] = 0.0; if (((ph*TILE_WIDTH + ty) < Width) && (Col < Width)) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; else Nds[ty][tx] = 0.0; __syncthreads(); // for syncronizeing the threads for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); // for syncronizeing the threads } if ((Row < Width) && (Col < Width)) d_P[Row*Width + Col] = Pvalue; } int main() { const int Width = 6; float h_Ma[Width][Width], h_Mb[Width][Width], h_Mc[Width][Width]; float *d_Ma, *d_Mb, *d_Mc; // device array hipEvent_t start, stop; float elapsedTime; int i, j; /*h_Ma = (float**) malloc((Width)*sizeof(float)); h_Mb = (float**) malloc((Width)*sizeof(float)); h_Mc = (float**) malloc((Width)*sizeof(float)); for (i = 0; i < Width; i++) { h_Ma[i] = (float*) malloc((Width)*sizeof(float)); h_Mb[i] = (float*) malloc((Width)*sizeof(float)); h_Mc[i] = (float*) malloc((Width)*sizeof(float)); }*/ //input in host array for (i = 0; i < Width; i++) { for (j = 0; j < Width; j++) { h_Ma[i][j] = 1 ; h_Mb[i][j] = 2 ; } } int size = (Width*Width)*sizeof(float); //create device array hipMalloc ( (void **)&array_name, sizeofmatrixinbytes); hipMalloc((void **) &d_Ma, size); hipMalloc((void **) &d_Mb, size); //copy host array to device array; hipMemcpy ( dest , source , WIDTH , direction ) hipMemcpy(d_Ma, h_Ma, size, hipMemcpyHostToDevice); hipMemcpy(d_Mb, h_Mb, size, hipMemcpyHostToDevice); //allocating memory for resultent device array hipMalloc((void **) &d_Mc, size); //calling kernal dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); hipEventCreate(&start); hipEventRecord(start,0); hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Ma, d_Mb, d_Mc, Width); hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); // all gpu function blocked till kernel is working //copy back result_array_d to result_array_h hipMemcpy(h_Mc, d_Mc, size, hipMemcpyDeviceToHost); //printf the result array for (i = 0; i < Width; i++) { for (j = 0; j < Width; j++) { printf("%f ", h_Mc[i][j]); } printf("\n"); } hipEventElapsedTime(&elapsedTime, start,stop); printf("Tiempo transcurrido : %f ms\n" ,elapsedTime); hipFree(d_Ma); hipFree(d_Mb); hipFree(d_Mc); //system("pause") ; return 0; }
273a8cd9f8cce17bad046104073c9c8e9bba2aa4.cu
#include <stdio.h> #include <math.h> #define TILE_WIDTH 2 __global__ void MatrixMulKernel(float *d_M , float *d_N , float *d_P , int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; // Loop over the d_M and d_N tiles required to compute d_P element // ph indicate number of phase for (int ph = 0; ph < ceil(Width/(float)TILE_WIDTH); ++ph) { // Collaborative loading of d_M and d_N tiles into shared memory if ((Row < Width) && ((ph*TILE_WIDTH + tx) < Width)) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; else Mds[ty][tx] = 0.0; if (((ph*TILE_WIDTH + ty) < Width) && (Col < Width)) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; else Nds[ty][tx] = 0.0; __syncthreads(); // for syncronizeing the threads for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); // for syncronizeing the threads } if ((Row < Width) && (Col < Width)) d_P[Row*Width + Col] = Pvalue; } int main() { const int Width = 6; float h_Ma[Width][Width], h_Mb[Width][Width], h_Mc[Width][Width]; float *d_Ma, *d_Mb, *d_Mc; // device array cudaEvent_t start, stop; float elapsedTime; int i, j; /*h_Ma = (float**) malloc((Width)*sizeof(float)); h_Mb = (float**) malloc((Width)*sizeof(float)); h_Mc = (float**) malloc((Width)*sizeof(float)); for (i = 0; i < Width; i++) { h_Ma[i] = (float*) malloc((Width)*sizeof(float)); h_Mb[i] = (float*) malloc((Width)*sizeof(float)); h_Mc[i] = (float*) malloc((Width)*sizeof(float)); }*/ //input in host array for (i = 0; i < Width; i++) { for (j = 0; j < Width; j++) { h_Ma[i][j] = 1 ; h_Mb[i][j] = 2 ; } } int size = (Width*Width)*sizeof(float); //create device array cudaMalloc ( (void **)&array_name, sizeofmatrixinbytes); cudaMalloc((void **) &d_Ma, size); cudaMalloc((void **) &d_Mb, size); //copy host array to device array; cudaMemcpy ( dest , source , WIDTH , direction ) cudaMemcpy(d_Ma, h_Ma, size, cudaMemcpyHostToDevice); cudaMemcpy(d_Mb, h_Mb, size, cudaMemcpyHostToDevice); //allocating memory for resultent device array cudaMalloc((void **) &d_Mc, size); //calling kernal dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); cudaEventCreate(&start); cudaEventRecord(start,0); MatrixMulKernel<<<dimGrid,dimBlock>>>(d_Ma, d_Mb, d_Mc, Width); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); // all gpu function blocked till kernel is working //copy back result_array_d to result_array_h cudaMemcpy(h_Mc, d_Mc, size, cudaMemcpyDeviceToHost); //printf the result array for (i = 0; i < Width; i++) { for (j = 0; j < Width; j++) { printf("%f ", h_Mc[i][j]); } printf("\n"); } cudaEventElapsedTime(&elapsedTime, start,stop); printf("Tiempo transcurrido : %f ms\n" ,elapsedTime); cudaFree(d_Ma); cudaFree(d_Mb); cudaFree(d_Mc); //system("pause") ; return 0; }
245e4ec1d132f9842b8dd10a93785c84a7eb31d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // to compile on cooley: nvcc -arch sm_30 -o mandelbrot mandelbrot.cu -lm // to run on cooley: ./mandelbrot #include <math.h> #include <stdio.h> #include <stdlib.h> #define MXITER 1000 #define NPOINTS 2048 // we will use these later to specify a 16x16 thread-block size #define TX 16 #define TY 16 typedef struct { double r; double i; }d_complex; // return 1 if c is outside the mandelbrot set // return 0 if c is inside the mandelbrot set // TASK 1: annotate this as a device function __device__ int testpoint(d_complex c){ d_complex z = c; for(int iter=0; iter<MXITER; iter++){ double temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return 1; } } return 0; } // FREEBIE: partial reduction __device__ void partialReduction(int outside, int *outsideCounts){ __shared__ int s_outside[TX*TY]; int t = threadIdx.x + threadIdx.y*TX; s_outside[t] = outside; int alive = TX*TY; while(alive>1){ __syncthreads(); alive /= 2; if(t<alive && t+alive<TX*TY) s_outside[t] += s_outside[t+alive]; } if(t==0){ int b = blockIdx.x + gridDim.x*blockIdx.y; outsideCounts[b] = s_outside[0]; } } // TASK 2: make this a kernel that processes // (i,j) \in [blockIdx.x*blockDim.x,(blockIdx.x+1)*blockDim.x) // x [blockIdx.y*blockDim.y,(blockIdx.y+1)*blockDim.y) // TASK 2a: annotate this to indicate it is a kernel and change return type to void __global__ void mandeloutside(int * outsideCounts){ double eps = 1e-5; d_complex c; // TASK 2b: replace loop structures with (i,j) defined from blockIdx, blockDim, threadIdx // for(i=0;i<NPOINTS;i++){ // for(j=0;j<NPOINTS;j++){ int i = threadIdx.x+blockIdx.x*TX; int j = threadIdx.y+blockIdx.y*TY; c.r = -2. + 2.5*((double)i)/(double)(NPOINTS)+eps; c.i = 1.125*((double)j)/(double)(NPOINTS)+eps; // TASK 2c: replace this with a partial sum reduction of numoutside in thread block int outside = 0; if(i<NPOINTS && j<NPOINTS){ outside = testpoint(c); } // } // } // FREEBIE: reduction of TX*TY values to one value on each thread-block partialReduction(outside, outsideCounts); } int main(int argc, char **argv){ // TASK 3a: define dim3 variables for the grid size and thread-block size int GX = (NPOINTS+TX-1)/TX; int GY = (NPOINTS+TY-1)/TY; dim3 dimGrid(GX,GY,1); dim3 dimBlock(TX,TY,1); // TASK 3b: use hipMalloc to create a DEVICE array that has one entry for each thread-block int *c_outsideCounts; hipMalloc(&c_outsideCounts, GX*GY*sizeof(int)); // FREEBIE: create CUDA events for timing hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); // TASK 3c: replace this with a kernel call hipLaunchKernelGGL(( mandeloutside) , dim3(dimGrid), dim3(dimBlock) , 0, 0, c_outsideCounts); // FREEBIE: timing float elapsed; hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&elapsed, start, end); elapsed /= 1000; printf("elapsed = %g\n", elapsed); // TASK 3d: allocate a HOST array to receive the contents of the c_outsideCounts array int *h_outsideCounts = (int*) calloc(GX*GY, sizeof(int)); // TASK 3e: use hipMemcpy to copy the contents of the entries of c_outsideCounts to h_outsideCounts hipMemcpy(h_outsideCounts, c_outsideCounts, GX*GY*sizeof(int), hipMemcpyDeviceToHost); // TASK 3f: sum up the outsideCounts int numoutside = 0; for(int n=0;n<GX*GY;++n){ numoutside += h_outsideCounts[n]; } printf("numoustide = %d\n", numoutside); double area = 2.*2.5*1.125*(NPOINTS*NPOINTS-numoutside)/(NPOINTS*NPOINTS); printf("area = %17.15lf\n", area); return 0; }
245e4ec1d132f9842b8dd10a93785c84a7eb31d5.cu
// to compile on cooley: nvcc -arch sm_30 -o mandelbrot mandelbrot.cu -lm // to run on cooley: ./mandelbrot #include <math.h> #include <stdio.h> #include <stdlib.h> #define MXITER 1000 #define NPOINTS 2048 // we will use these later to specify a 16x16 thread-block size #define TX 16 #define TY 16 typedef struct { double r; double i; }d_complex; // return 1 if c is outside the mandelbrot set // return 0 if c is inside the mandelbrot set // TASK 1: annotate this as a device function __device__ int testpoint(d_complex c){ d_complex z = c; for(int iter=0; iter<MXITER; iter++){ double temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return 1; } } return 0; } // FREEBIE: partial reduction __device__ void partialReduction(int outside, int *outsideCounts){ __shared__ int s_outside[TX*TY]; int t = threadIdx.x + threadIdx.y*TX; s_outside[t] = outside; int alive = TX*TY; while(alive>1){ __syncthreads(); alive /= 2; if(t<alive && t+alive<TX*TY) s_outside[t] += s_outside[t+alive]; } if(t==0){ int b = blockIdx.x + gridDim.x*blockIdx.y; outsideCounts[b] = s_outside[0]; } } // TASK 2: make this a kernel that processes // (i,j) \in [blockIdx.x*blockDim.x,(blockIdx.x+1)*blockDim.x) // x [blockIdx.y*blockDim.y,(blockIdx.y+1)*blockDim.y) // TASK 2a: annotate this to indicate it is a kernel and change return type to void __global__ void mandeloutside(int * outsideCounts){ double eps = 1e-5; d_complex c; // TASK 2b: replace loop structures with (i,j) defined from blockIdx, blockDim, threadIdx // for(i=0;i<NPOINTS;i++){ // for(j=0;j<NPOINTS;j++){ int i = threadIdx.x+blockIdx.x*TX; int j = threadIdx.y+blockIdx.y*TY; c.r = -2. + 2.5*((double)i)/(double)(NPOINTS)+eps; c.i = 1.125*((double)j)/(double)(NPOINTS)+eps; // TASK 2c: replace this with a partial sum reduction of numoutside in thread block int outside = 0; if(i<NPOINTS && j<NPOINTS){ outside = testpoint(c); } // } // } // FREEBIE: reduction of TX*TY values to one value on each thread-block partialReduction(outside, outsideCounts); } int main(int argc, char **argv){ // TASK 3a: define dim3 variables for the grid size and thread-block size int GX = (NPOINTS+TX-1)/TX; int GY = (NPOINTS+TY-1)/TY; dim3 dimGrid(GX,GY,1); dim3 dimBlock(TX,TY,1); // TASK 3b: use cudaMalloc to create a DEVICE array that has one entry for each thread-block int *c_outsideCounts; cudaMalloc(&c_outsideCounts, GX*GY*sizeof(int)); // FREEBIE: create CUDA events for timing cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); // TASK 3c: replace this with a kernel call mandeloutside <<< dimGrid, dimBlock >>> (c_outsideCounts); // FREEBIE: timing float elapsed; cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed, start, end); elapsed /= 1000; printf("elapsed = %g\n", elapsed); // TASK 3d: allocate a HOST array to receive the contents of the c_outsideCounts array int *h_outsideCounts = (int*) calloc(GX*GY, sizeof(int)); // TASK 3e: use cudaMemcpy to copy the contents of the entries of c_outsideCounts to h_outsideCounts cudaMemcpy(h_outsideCounts, c_outsideCounts, GX*GY*sizeof(int), cudaMemcpyDeviceToHost); // TASK 3f: sum up the outsideCounts int numoutside = 0; for(int n=0;n<GX*GY;++n){ numoutside += h_outsideCounts[n]; } printf("numoustide = %d\n", numoutside); double area = 2.*2.5*1.125*(NPOINTS*NPOINTS-numoutside)/(NPOINTS*NPOINTS); printf("area = %17.15lf\n", area); return 0; }
462eda920bbcf79e4e33ab538ee747198192a7ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2009-2020 NTESS. Under the terms // of Contract DE-NA0003525 with NTESS, the U.S. // Government retains certain rights in this software. // // Copyright (c) 2009-2020, NTESS // All rights reserved. // // Portions are copyright of other developers: // See the file CONTRIBUTORS.TXT in the top level directory // the distribution for more information. // // This file is part of the SST software package. For license // information, see the LICENSE file in the top level directory of the // distribution. #include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(int *a, int *b, int *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 131072; // Host input vectors int *h_a; int *h_b; printf("init point h_a %p\n",h_a); //Host output vector int *h_c; // Device input vectors int *d_a; printf("init point d_a %p\n",d_a); int *d_b; //Device output vector int *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(int); // Allocate memory for each vector on host h_a = (int*)malloc(bytes); printf("malloc point h_a %p\n",h_a); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); printf("cuda malloc point d_a %p\n",d_a); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] =3; h_b[i] = 4; } // Copy host vectors to device printf("pre cpy point h_a %p\n",h_a); printf("pre cpy point d_a %p\n",d_a); hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 256; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); // Execute the kernel hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n); // Copy array back to host hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error int sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("final result: %d\n", sum/n); // Release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
462eda920bbcf79e4e33ab538ee747198192a7ab.cu
// Copyright 2009-2020 NTESS. Under the terms // of Contract DE-NA0003525 with NTESS, the U.S. // Government retains certain rights in this software. // // Copyright (c) 2009-2020, NTESS // All rights reserved. // // Portions are copyright of other developers: // See the file CONTRIBUTORS.TXT in the top level directory // the distribution for more information. // // This file is part of the SST software package. For license // information, see the LICENSE file in the top level directory of the // distribution. #include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(int *a, int *b, int *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 131072; // Host input vectors int *h_a; int *h_b; printf("init point h_a %p\n",h_a); //Host output vector int *h_c; // Device input vectors int *d_a; printf("init point d_a %p\n",d_a); int *d_b; //Device output vector int *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(int); // Allocate memory for each vector on host h_a = (int*)malloc(bytes); printf("malloc point h_a %p\n",h_a); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); printf("cuda malloc point d_a %p\n",d_a); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] =3; h_b[i] = 4; } // Copy host vectors to device printf("pre cpy point h_a %p\n",h_a); printf("pre cpy point d_a %p\n",d_a); cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 256; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); // Execute the kernel vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); // Copy array back to host cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error int sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("final result: %d\n", sum/n); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
9ebc5994eae22fa9ca748596684037d364c36e09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ica/fastica/kernels.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_sumAbs( float *d_sum, float *d_X ) { // Based on the "Optimizing Parallel Reduction in CUDA" paper by Mark Harris. extern __shared__ float s_data[]; unsigned int row = threadIdx.y; unsigned int col = 0; float sum = 0.0f; #pragma unroll for (col = 0; col < blockDim.y; col++) { sum += fabsf( d_X[ col * blockDim.y + row ] ); } s_data[row] = sum; __syncthreads(); if (blockDim.y >= 256) { if (row < 128) { s_data[row] += s_data[row + 128]; }} __syncthreads(); if (blockDim.y >= 128) { if (row < 64) { s_data[row] += s_data[row + 64]; }} __syncthreads(); if (blockDim.y >= 64) { if (row < 32) { s_data[row] += s_data[row + 32]; }} if (blockDim.y >= 32) { if (row < 16) { s_data[row] += s_data[row + 16]; }} if (blockDim.y >= 16) { if (row < 8) { s_data[row] += s_data[row + 8]; }} if (row < 8) { s_data[row] += s_data[row + 4]; s_data[row] += s_data[row + 2]; s_data[row] += s_data[row + 1]; } if (row == 0) { *d_sum = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_scaleMatrix( float *d_X, float *alpha ) { unsigned int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.y + threadIdx.x; d_X[idx] = d_X[idx] / alpha[0]; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_tanh( float *d_ws, int ld ) { int idx = blockIdx.x * ld + threadIdx.y; d_ws[idx] = tanh( d_ws[idx] ); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_tanhDer(float *d_ws, unsigned int ld, unsigned int n_cols) { // Based on the "Optimizing Parallel Reduction in CUDA" paper by Mark Harris. float __shared__ s_data[256]; const unsigned int tid = threadIdx.x; const unsigned int row = blockIdx.y; unsigned int i = tid; float accum; s_data[tid] = 0; // The big difference between this code and Mark Harris' code, is the change // from a simple summation, to the summing of (1 - x^2) for every element // 'x' in a row. while (i < n_cols) { accum = d_ws[ i * ld + row ]; accum = 1.0f - accum * accum; s_data[tid] += accum; i += 256; } __syncthreads(); if (tid < 128) { s_data[tid] += s_data[tid + 128]; } __syncthreads(); if (tid < 64) { s_data[tid] += s_data[tid + 64]; } __syncthreads(); if (tid < 32) { s_data[tid] += s_data[tid + 32]; s_data[tid] += s_data[tid + 16]; s_data[tid] += s_data[tid + 8]; s_data[tid] += s_data[tid + 4]; s_data[tid] += s_data[tid + 2]; s_data[tid] += s_data[tid + 1]; } if (tid == 0) { d_ws[ row ] = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_cubeRule( float *d_wsum, float *d_ws, unsigned int ld, unsigned int n_cols ) { float __shared__ s_data[256]; const unsigned int tid = threadIdx.x; const unsigned int row = blockIdx.y; unsigned int i = tid; float val; s_data[tid] = 0.0f; while (i < n_cols) { val = d_ws[ i * ld + row ]; s_data[tid] += 3.0f * val * val; d_ws[ i * ld + row ] = val * val * val; i += 256; } __syncthreads(); if (tid < 128) { s_data[tid] += s_data[tid + 128]; } __syncthreads(); if (tid < 64) { s_data[tid] += s_data[tid + 64]; } __syncthreads(); if (tid < 32) { s_data[tid] += s_data[tid + 32]; s_data[tid] += s_data[tid + 16]; s_data[tid] += s_data[tid + 8]; s_data[tid] += s_data[tid + 4]; s_data[tid] += s_data[tid + 2]; s_data[tid] += s_data[tid + 1]; } if (tid == 0) { d_wsum[row] = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_gaussRule( float *d_wsum, float *d_ws, unsigned int ld, unsigned int n_cols ) { float __shared__ s_data[256]; const unsigned int tid = threadIdx.x; const unsigned int row = blockIdx.y; unsigned int i = tid; unsigned int ws_idx; float wz, sqr, expo; s_data[tid] = 0.0f; while (i < n_cols) { ws_idx = i * ld + row; wz = d_ws[ ws_idx ]; sqr = wz * wz; expo = exp( -sqr / 2.0f ); s_data[tid] += (1.0f - sqr) * expo; d_ws[ ws_idx ] = wz * expo; i += 256; } __syncthreads(); if (tid < 128) { s_data[tid] += s_data[tid + 128]; } __syncthreads(); if (tid < 64) { s_data[tid] += s_data[tid + 64]; } __syncthreads(); if (tid < 32) { s_data[tid] += s_data[tid + 32]; s_data[tid] += s_data[tid + 16]; s_data[tid] += s_data[tid + 8]; s_data[tid] += s_data[tid + 4]; s_data[tid] += s_data[tid + 2]; s_data[tid] += s_data[tid + 1]; } if (tid == 0) { d_wsum[row] = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_wnext( float *d_w, float *d_wx, float *d_sums, unsigned int ld, unsigned int n_cols ) { unsigned int col = blockDim.x * blockIdx.x + threadIdx.x; unsigned int row = blockDim.y * blockIdx.y + threadIdx.y; unsigned int idx = col * ld + row; d_w[idx] = (d_wx[idx] - d_sums[row] * d_w[idx]) / (float) n_cols; }
9ebc5994eae22fa9ca748596684037d364c36e09.cu
#include "ica/fastica/kernels.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_sumAbs( float *d_sum, float *d_X ) { // Based on the "Optimizing Parallel Reduction in CUDA" paper by Mark Harris. extern __shared__ float s_data[]; unsigned int row = threadIdx.y; unsigned int col = 0; float sum = 0.0f; #pragma unroll for (col = 0; col < blockDim.y; col++) { sum += fabsf( d_X[ col * blockDim.y + row ] ); } s_data[row] = sum; __syncthreads(); if (blockDim.y >= 256) { if (row < 128) { s_data[row] += s_data[row + 128]; }} __syncthreads(); if (blockDim.y >= 128) { if (row < 64) { s_data[row] += s_data[row + 64]; }} __syncthreads(); if (blockDim.y >= 64) { if (row < 32) { s_data[row] += s_data[row + 32]; }} if (blockDim.y >= 32) { if (row < 16) { s_data[row] += s_data[row + 16]; }} if (blockDim.y >= 16) { if (row < 8) { s_data[row] += s_data[row + 8]; }} if (row < 8) { s_data[row] += s_data[row + 4]; s_data[row] += s_data[row + 2]; s_data[row] += s_data[row + 1]; } if (row == 0) { *d_sum = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_scaleMatrix( float *d_X, float *alpha ) { unsigned int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.y + threadIdx.x; d_X[idx] = d_X[idx] / alpha[0]; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_tanh( float *d_ws, int ld ) { int idx = blockIdx.x * ld + threadIdx.y; d_ws[idx] = tanh( d_ws[idx] ); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_tanhDer(float *d_ws, unsigned int ld, unsigned int n_cols) { // Based on the "Optimizing Parallel Reduction in CUDA" paper by Mark Harris. float __shared__ s_data[256]; const unsigned int tid = threadIdx.x; const unsigned int row = blockIdx.y; unsigned int i = tid; float accum; s_data[tid] = 0; // The big difference between this code and Mark Harris' code, is the change // from a simple summation, to the summing of (1 - x^2) for every element // 'x' in a row. while (i < n_cols) { accum = d_ws[ i * ld + row ]; accum = 1.0f - accum * accum; s_data[tid] += accum; i += 256; } __syncthreads(); if (tid < 128) { s_data[tid] += s_data[tid + 128]; } __syncthreads(); if (tid < 64) { s_data[tid] += s_data[tid + 64]; } __syncthreads(); if (tid < 32) { s_data[tid] += s_data[tid + 32]; s_data[tid] += s_data[tid + 16]; s_data[tid] += s_data[tid + 8]; s_data[tid] += s_data[tid + 4]; s_data[tid] += s_data[tid + 2]; s_data[tid] += s_data[tid + 1]; } if (tid == 0) { d_ws[ row ] = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_cubeRule( float *d_wsum, float *d_ws, unsigned int ld, unsigned int n_cols ) { float __shared__ s_data[256]; const unsigned int tid = threadIdx.x; const unsigned int row = blockIdx.y; unsigned int i = tid; float val; s_data[tid] = 0.0f; while (i < n_cols) { val = d_ws[ i * ld + row ]; s_data[tid] += 3.0f * val * val; d_ws[ i * ld + row ] = val * val * val; i += 256; } __syncthreads(); if (tid < 128) { s_data[tid] += s_data[tid + 128]; } __syncthreads(); if (tid < 64) { s_data[tid] += s_data[tid + 64]; } __syncthreads(); if (tid < 32) { s_data[tid] += s_data[tid + 32]; s_data[tid] += s_data[tid + 16]; s_data[tid] += s_data[tid + 8]; s_data[tid] += s_data[tid + 4]; s_data[tid] += s_data[tid + 2]; s_data[tid] += s_data[tid + 1]; } if (tid == 0) { d_wsum[row] = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_gaussRule( float *d_wsum, float *d_ws, unsigned int ld, unsigned int n_cols ) { float __shared__ s_data[256]; const unsigned int tid = threadIdx.x; const unsigned int row = blockIdx.y; unsigned int i = tid; unsigned int ws_idx; float wz, sqr, expo; s_data[tid] = 0.0f; while (i < n_cols) { ws_idx = i * ld + row; wz = d_ws[ ws_idx ]; sqr = wz * wz; expo = exp( -sqr / 2.0f ); s_data[tid] += (1.0f - sqr) * expo; d_ws[ ws_idx ] = wz * expo; i += 256; } __syncthreads(); if (tid < 128) { s_data[tid] += s_data[tid + 128]; } __syncthreads(); if (tid < 64) { s_data[tid] += s_data[tid + 64]; } __syncthreads(); if (tid < 32) { s_data[tid] += s_data[tid + 32]; s_data[tid] += s_data[tid + 16]; s_data[tid] += s_data[tid + 8]; s_data[tid] += s_data[tid + 4]; s_data[tid] += s_data[tid + 2]; s_data[tid] += s_data[tid + 1]; } if (tid == 0) { d_wsum[row] = s_data[0]; } } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void __global__ fica_wnext( float *d_w, float *d_wx, float *d_sums, unsigned int ld, unsigned int n_cols ) { unsigned int col = blockDim.x * blockIdx.x + threadIdx.x; unsigned int row = blockDim.y * blockIdx.y + threadIdx.y; unsigned int idx = col * ld + row; d_w[idx] = (d_wx[idx] - d_sums[row] * d_w[idx]) / (float) n_cols; }
2485a3054ae1bb64b98533f3da412e6253df7896.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <string.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define HALO 1 // halo width along one direction when advancing to the next iteration #define CUDA_CALL_SAFE(f) \ do \ { \ hipError_t _cuda_error = f; \ if (_cuda_error != hipSuccess) \ { \ fprintf(stderr, \ "%s, %d, CUDA ERROR: %s %s\n", \ __FILE__, \ __LINE__, \ hipGetErrorName(_cuda_error), \ hipGetErrorString(_cuda_error) \ ); \ abort(); \ exit(EXIT_FAILURE); \ } \ } while (0) static inline double time_diff(struct timeval tv_start, struct timeval tv_end) { return (double)(tv_end.tv_sec - tv_start.tv_sec) * 1000.0 + (double)(tv_end.tv_usec - tv_start.tv_usec) / 1000.0; } void run(int argc, char** argv); long rows, cols; int *data; int *result; long pyramid_height = 1; char *folder; char *filepath; FILE *fp; struct timeval tv_start, tv_end; double kernel_time = 0; // in ms double writefile_time = 0; // in ms double readfile_time = 0; // in ms void init(int argc, char** argv) { if (argc == 3) { cols = atol(argv[1]); rows = cols; folder = argv[2]; } else { printf("Usage: %s <rows/cols> <folder>\n", argv[0]); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(hipMallocManaged(&data, sizeof(int) * rows * cols)); filepath = (char *)malloc(sizeof(char) * (strlen(folder) + 128)); if (!filepath) { fprintf(stderr, "Cannot allocate filepath"); exit(EXIT_FAILURE); } gettimeofday(&tv_start, NULL); sprintf(filepath, "%s/data.mem", folder); if ((fp = fopen(filepath, "rb")) == 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (fread(data, sizeof(int) * rows * cols, 1, fp) != 1) { fprintf(stderr, "Cannot read from %s\n", filepath); exit(EXIT_FAILURE); } fclose(fp); gettimeofday(&tv_end, NULL); readfile_time += time_diff(tv_start, tv_end); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( long iteration, int *gpuWall, int *gpuSrc, int *gpuResults, long cols, long rows, long startStep, long border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; long bx = (long)blockIdx.x; long tx = (long)threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size long small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block long blkX = small_block_cols*bx-border; long blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination long xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. long validXmin = (blkX < 0) ? -blkX : 0; long validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; long W = tx-1; long E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (long i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; long left = prev[W]; long up = prev[tx]; long right = prev[E]; long shortest = MIN(left, up); shortest = MIN(shortest, right); long index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], long rows, long cols, \ long pyramid_height, long blockCols, long borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (long t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); CUDA_CALL_SAFE(hipDeviceSynchronize()); } return dst; } int main(int argc, char** argv) { run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ long borderCols = (pyramid_height)*HALO; long smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; long blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; CUDA_CALL_SAFE(hipMallocManaged((void**)&gpuResult[0], sizeof(int)*cols)); CUDA_CALL_SAFE(hipMallocManaged((void**)&gpuResult[1], sizeof(int)*cols)); gpuWall = data + cols; memcpy(gpuResult[0], data, sizeof(int) * cols); gettimeofday(&tv_start, NULL); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height, blockCols, borderCols); gettimeofday(&tv_end, NULL); kernel_time += time_diff(tv_start, tv_end); result = gpuResult[final_ret]; gettimeofday(&tv_start, NULL); sprintf(filepath, "%s/result.uvm.mem", folder); if ((fp = fopen(filepath, "wb")) == 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (fwrite(result, sizeof(int) * cols, 1, fp) != 1) { fprintf(stderr, "Cannot write to %s\n", filepath); exit(EXIT_FAILURE); } fflush(fp); fsync(fileno(fp)); fclose(fp); gettimeofday(&tv_end, NULL); writefile_time += time_diff(tv_start, tv_end); hipFree(gpuResult[0]); hipFree(gpuResult[1]); hipFree(data); free(filepath); printf("==> header: kernel_time (ms),writefile_time (ms),readfile_time (ms)\n"); printf("==> data: %f,%f,%f\n", kernel_time, writefile_time, readfile_time); }
2485a3054ae1bb64b98533f3da412e6253df7896.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include <string.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define HALO 1 // halo width along one direction when advancing to the next iteration #define CUDA_CALL_SAFE(f) \ do \ { \ cudaError_t _cuda_error = f; \ if (_cuda_error != cudaSuccess) \ { \ fprintf(stderr, \ "%s, %d, CUDA ERROR: %s %s\n", \ __FILE__, \ __LINE__, \ cudaGetErrorName(_cuda_error), \ cudaGetErrorString(_cuda_error) \ ); \ abort(); \ exit(EXIT_FAILURE); \ } \ } while (0) static inline double time_diff(struct timeval tv_start, struct timeval tv_end) { return (double)(tv_end.tv_sec - tv_start.tv_sec) * 1000.0 + (double)(tv_end.tv_usec - tv_start.tv_usec) / 1000.0; } void run(int argc, char** argv); long rows, cols; int *data; int *result; long pyramid_height = 1; char *folder; char *filepath; FILE *fp; struct timeval tv_start, tv_end; double kernel_time = 0; // in ms double writefile_time = 0; // in ms double readfile_time = 0; // in ms void init(int argc, char** argv) { if (argc == 3) { cols = atol(argv[1]); rows = cols; folder = argv[2]; } else { printf("Usage: %s <rows/cols> <folder>\n", argv[0]); exit(EXIT_FAILURE); } CUDA_CALL_SAFE(cudaMallocManaged(&data, sizeof(int) * rows * cols)); filepath = (char *)malloc(sizeof(char) * (strlen(folder) + 128)); if (!filepath) { fprintf(stderr, "Cannot allocate filepath"); exit(EXIT_FAILURE); } gettimeofday(&tv_start, NULL); sprintf(filepath, "%s/data.mem", folder); if ((fp = fopen(filepath, "rb")) == 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (fread(data, sizeof(int) * rows * cols, 1, fp) != 1) { fprintf(stderr, "Cannot read from %s\n", filepath); exit(EXIT_FAILURE); } fclose(fp); gettimeofday(&tv_end, NULL); readfile_time += time_diff(tv_start, tv_end); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( long iteration, int *gpuWall, int *gpuSrc, int *gpuResults, long cols, long rows, long startStep, long border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; long bx = (long)blockIdx.x; long tx = (long)threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size long small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block long blkX = small_block_cols*bx-border; long blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination long xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. long validXmin = (blkX < 0) ? -blkX : 0; long validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; long W = tx-1; long E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (long i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; long left = prev[W]; long up = prev[tx]; long right = prev[E]; long shortest = MIN(left, up); shortest = MIN(shortest, right); long index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], long rows, long cols, \ long pyramid_height, long blockCols, long borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (long t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; dynproc_kernel<<<dimGrid, dimBlock>>>( MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); CUDA_CALL_SAFE(cudaThreadSynchronize()); } return dst; } int main(int argc, char** argv) { run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ long borderCols = (pyramid_height)*HALO; long smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; long blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; CUDA_CALL_SAFE(cudaMallocManaged((void**)&gpuResult[0], sizeof(int)*cols)); CUDA_CALL_SAFE(cudaMallocManaged((void**)&gpuResult[1], sizeof(int)*cols)); gpuWall = data + cols; memcpy(gpuResult[0], data, sizeof(int) * cols); gettimeofday(&tv_start, NULL); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height, blockCols, borderCols); gettimeofday(&tv_end, NULL); kernel_time += time_diff(tv_start, tv_end); result = gpuResult[final_ret]; gettimeofday(&tv_start, NULL); sprintf(filepath, "%s/result.uvm.mem", folder); if ((fp = fopen(filepath, "wb")) == 0) { fprintf(stderr, "%s was not opened\n", filepath); exit(EXIT_FAILURE); } if (fwrite(result, sizeof(int) * cols, 1, fp) != 1) { fprintf(stderr, "Cannot write to %s\n", filepath); exit(EXIT_FAILURE); } fflush(fp); fsync(fileno(fp)); fclose(fp); gettimeofday(&tv_end, NULL); writefile_time += time_diff(tv_start, tv_end); cudaFree(gpuResult[0]); cudaFree(gpuResult[1]); cudaFree(data); free(filepath); printf("==> header: kernel_time (ms),writefile_time (ms),readfile_time (ms)\n"); printf("==> data: %f,%f,%f\n", kernel_time, writefile_time, readfile_time); }
8f74d8554645191df9ca138221118cb2830a03c5.hip
// !!! This is a file automatically generated by hipify!!! //=================================================================// // CUDA BFS kernel // Topological-Driven: one node per thread, enable kernel unrolling // use atomicMin for distance updates // Reference: // lonestar-GPU bfs_atomic algorithm //=================================================================// #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" #define WORKPERTHREAD 1 #define VERTICALWORKPERTHREAD 12 // unroll level #define BLKSIZE 1024 #define BANKSIZE BLKSIZE __global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_property[tid] = MY_INFINITY; } } __global__ void kernel(uint32_t *vplist, cudaGraph graph, unsigned unroll, bool *changed) { unsigned nn = WORKPERTHREAD * (blockIdx.x * blockDim.x + threadIdx.x); unsigned int ii; __shared__ unsigned changedv[VERTICALWORKPERTHREAD * BLKSIZE]; unsigned iichangedv = threadIdx.x; unsigned anotheriichangedv = iichangedv; unsigned int nprocessed = 0; // collect the work to be performed. for (unsigned node = 0; node < WORKPERTHREAD; ++node, ++nn) { changedv[iichangedv] = nn; iichangedv += BANKSIZE; } // go over the worklist and keep updating it in a BFS manner. while (anotheriichangedv < iichangedv) { nn = changedv[anotheriichangedv]; anotheriichangedv += BANKSIZE; if (nn < graph.vertex_cnt) { unsigned src = nn; // source node. uint64_t start = graph.get_firstedge_index(src); uint64_t end = graph.get_edge_index_end(src); // go over all the target nodes for the source node. for (ii = start; ii < end; ++ii) { unsigned int u = src; unsigned int v = graph.get_edge_dest(ii); // target node. unsigned wt = 1; uint32_t alt = vplist[u] + wt; if (alt < vplist[v]) { atomicMin(&(vplist[v]), alt); if (++nprocessed < unroll) { // add work to the worklist. changedv[iichangedv] = v; iichangedv += BANKSIZE; } } } } } if (nprocessed) *changed = true; } void cuda_BFS(uint64_t * vertexlist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt, uint64_t root) { uint32_t * device_vpl = 0; bool * device_over = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time unsigned ProcCnt = 1; unsigned factor = 128; unsigned unroll = VERTICALWORKPERTHREAD; // unroll parameter <=== can be changed // set cuda to be sharedmem friendly hipFuncSetCacheConfig(kernel, hipFuncCachePreferShared); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); ProcCnt = deviceProp.multiProcessorCount; factor = (vertex_cnt + BLKSIZE * ProcCnt - 1) / (BLKSIZE * ProcCnt); unsigned int num_block = ProcCnt * factor; // malloc of gpu side cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) ); hipEvent_t start_event, stop_event; cudaErrCheck( hipEventCreate(&start_event) ); cudaErrCheck( hipEventCreate(&stop_event) ); // initialization hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(BLKSIZE), 0, 0, device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt); uint32_t zeronum=0; // memcpy from host to device hipEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t), hipMemcpyHostToDevice) ); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&h2d_copy_time, start_event, stop_event); // BFS traversal bool stop; hipEventRecord(start_event, 0); int k=0; do { // Each iteration processes // one level of BFS traversal stop = false; cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( kernel), dim3(num_block/WORKPERTHREAD), dim3(BLKSIZE), 0, 0, device_vpl, d_graph, unroll, device_over); cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) ); k++; }while(stop); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&kernel_time, start_event, stop_event); hipEventRecord(start_event, 0); cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), hipMemcpyDeviceToHost) ); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&d2h_copy_time, start_event, stop_event); printf("== iteration #: %d\n", k); #ifndef ENABLE_VERIFY printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); #endif hipEventDestroy(start_event); hipEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( hipFree(device_vpl) ); }
8f74d8554645191df9ca138221118cb2830a03c5.cu
//=================================================================// // CUDA BFS kernel // Topological-Driven: one node per thread, enable kernel unrolling // use atomicMin for distance updates // Reference: // lonestar-GPU bfs_atomic algorithm //=================================================================// #include <cuda.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" #define WORKPERTHREAD 1 #define VERTICALWORKPERTHREAD 12 // unroll level #define BLKSIZE 1024 #define BANKSIZE BLKSIZE __global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_property[tid] = MY_INFINITY; } } __global__ void kernel(uint32_t *vplist, cudaGraph graph, unsigned unroll, bool *changed) { unsigned nn = WORKPERTHREAD * (blockIdx.x * blockDim.x + threadIdx.x); unsigned int ii; __shared__ unsigned changedv[VERTICALWORKPERTHREAD * BLKSIZE]; unsigned iichangedv = threadIdx.x; unsigned anotheriichangedv = iichangedv; unsigned int nprocessed = 0; // collect the work to be performed. for (unsigned node = 0; node < WORKPERTHREAD; ++node, ++nn) { changedv[iichangedv] = nn; iichangedv += BANKSIZE; } // go over the worklist and keep updating it in a BFS manner. while (anotheriichangedv < iichangedv) { nn = changedv[anotheriichangedv]; anotheriichangedv += BANKSIZE; if (nn < graph.vertex_cnt) { unsigned src = nn; // source node. uint64_t start = graph.get_firstedge_index(src); uint64_t end = graph.get_edge_index_end(src); // go over all the target nodes for the source node. for (ii = start; ii < end; ++ii) { unsigned int u = src; unsigned int v = graph.get_edge_dest(ii); // target node. unsigned wt = 1; uint32_t alt = vplist[u] + wt; if (alt < vplist[v]) { atomicMin(&(vplist[v]), alt); if (++nprocessed < unroll) { // add work to the worklist. changedv[iichangedv] = v; iichangedv += BANKSIZE; } } } } } if (nprocessed) *changed = true; } void cuda_BFS(uint64_t * vertexlist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt, uint64_t root) { uint32_t * device_vpl = 0; bool * device_over = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time unsigned ProcCnt = 1; unsigned factor = 128; unsigned unroll = VERTICALWORKPERTHREAD; // unroll parameter <=== can be changed // set cuda to be sharedmem friendly cudaFuncSetCacheConfig(kernel, cudaFuncCachePreferShared); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); ProcCnt = deviceProp.multiProcessorCount; factor = (vertex_cnt + BLKSIZE * ProcCnt - 1) / (BLKSIZE * ProcCnt); unsigned int num_block = ProcCnt * factor; // malloc of gpu side cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) ); cudaEvent_t start_event, stop_event; cudaErrCheck( cudaEventCreate(&start_event) ); cudaErrCheck( cudaEventCreate(&stop_event) ); // initialization initialize<<<num_block, BLKSIZE>>>(device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt); uint32_t zeronum=0; // memcpy from host to device cudaEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t), cudaMemcpyHostToDevice) ); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event); // BFS traversal bool stop; cudaEventRecord(start_event, 0); int k=0; do { // Each iteration processes // one level of BFS traversal stop = false; cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ); kernel<<<num_block/WORKPERTHREAD, BLKSIZE>>>(device_vpl, d_graph, unroll, device_over); cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) ); k++; }while(stop); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&kernel_time, start_event, stop_event); cudaEventRecord(start_event, 0); cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event); printf("== iteration #: %d\n", k); #ifndef ENABLE_VERIFY printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); #endif cudaEventDestroy(start_event); cudaEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( cudaFree(device_vpl) ); }
7b1ca847c9cd3e742187226360a539816d06489f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "peakFinder.h" #include <stdio.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <functional> float *d_data = NULL; uint *d_conmap = NULL; uint *d_centers = NULL; uint *d_dense_centers = NULL; Peak *d_peaks = NULL; const int FILTER_PATCH_WIDTH = 32; const int FILTER_PATCH_HEIGHT = 4; const int FILTER_THREADS_PER_PATCH = FILTER_PATCH_WIDTH * FILTER_PATCH_HEIGHT; const int FILTER_PATCH_ON_WIDTH = (WIDTH) / FILTER_PATCH_WIDTH; const int FILTER_PATCH_ON_HEIGHT = (HEIGHT + FILTER_PATCH_HEIGHT - 1) / FILTER_PATCH_HEIGHT; const int FILTER_PATCH_PER_IMAGE = FILTER_PATCH_ON_WIDTH * FILTER_PATCH_ON_HEIGHT; __global__ void filterByThrHigh_v2(const float *d_data, uint *d_centers) { uint imgId = blockIdx.x / FILTER_PATCH_PER_IMAGE; uint patch_id = blockIdx.x % FILTER_PATCH_PER_IMAGE; uint patch_x = patch_id % FILTER_PATCH_ON_WIDTH; uint patch_y = patch_id / FILTER_PATCH_ON_WIDTH; __shared__ float data[FILTER_PATCH_HEIGHT * FILTER_PATCH_WIDTH]; __shared__ uint idxs[FILTER_PATCH_HEIGHT * FILTER_PATCH_WIDTH]; int irow = threadIdx.x / FILTER_PATCH_WIDTH; int icol = threadIdx.x % FILTER_PATCH_WIDTH; int row = patch_y * FILTER_PATCH_HEIGHT + irow; int col = patch_x * FILTER_PATCH_WIDTH + icol; const int NUM_NMS_AREA = FILTER_PATCH_WIDTH / FILTER_PATCH_HEIGHT; int local_area = icol / FILTER_PATCH_HEIGHT; int local_pos = local_area * (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT) + irow * FILTER_PATCH_HEIGHT + icol % FILTER_PATCH_HEIGHT; uint device_pos = imgId * (WIDTH * HEIGHT) + row * WIDTH + col; __shared__ bool has_candidate[NUM_NMS_AREA]; if (threadIdx.x < NUM_NMS_AREA) has_candidate[threadIdx.x] = false; __syncthreads(); // load data if (row < WIDTH && col < HEIGHT){ data[local_pos] = d_data[device_pos]; idxs[local_pos] = device_pos; } else{ data[local_pos] = 0; } if (data[local_pos] > thr_high) has_candidate[local_area] = true; __syncthreads(); // find maximum local_area = threadIdx.x / (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT); if (!has_candidate[local_area]) return; const int local_tid = threadIdx.x % (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT); const int local_offset = local_area * (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT); int num_of_working_threads = (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT) / 2; // if (local_tid >= num_of_working_threads) return; int idx_mul = 1; while (num_of_working_threads > 1 && local_tid < num_of_working_threads) { int idx1 = (local_tid * 2) * idx_mul + local_offset; int idx2 = idx1 + idx_mul; int idxm = data[idx1] > data[idx2] ? idx1 : idx2; data[idx1] = data[idxm]; idxs[idx1] = idxs[idxm]; __syncthreads(); idx_mul *= 2; num_of_working_threads /= 2; } if (local_tid == 0) { uint write_pos = blockIdx.x * NUM_NMS_AREA + local_area; d_centers[write_pos] = idxs[local_offset]; } } const int PATCH_WIDTH = (2 * HALF_WIDTH + 1); const int FF_LOAD_THREADS_PER_CENTER = 64; const int FF_THREADS_PER_CENTER = 32; const int FF_INFO_THREADS_PER_CENTER = FF_THREADS_PER_CENTER; // const int FF_SIDE_WIDTH = FF_THREADS_PER_CENTER / 4; // const int FF_SIDE_OFFSET = 1 - FF_SIDE_WIDTH / 2; const int FF_THREADS_PER_BLOCK = 64; const int FF_LOAD_PASS = (2 * HALF_WIDTH + 1) * (2 * HALF_WIDTH + 1) / FF_LOAD_THREADS_PER_CENTER + 1; const int FF_CENTERS_PER_BLOCK = FF_THREADS_PER_BLOCK / FF_LOAD_THREADS_PER_CENTER; __device__ void calPreSum(int *preSum) { for(int i = 1; i < FF_INFO_THREADS_PER_CENTER; i*=2){ int prevIdx = threadIdx.x - i; int sum = preSum[threadIdx.x]; if (prevIdx > 0){ sum += preSum[prevIdx]; } __syncthreads(); preSum[threadIdx.x] = sum; __syncthreads(); } } typedef float (*reducer) (const float &, const float &); typedef float (*transformer) (const int &); __device__ float deviceAdd(const float &a, const float &b) {return a+b;} __device__ float deviceMin(const float &a, const float &b) {return a<b?a:b;} __device__ float deviceMax(const float &a, const float &b) {return a>b?a:b;} const int WARP_SIZE = 32; __device__ __inline__ float warpReduce(float val, int npix, reducer r) { int offset = 32; if (npix < 32) { if (npix > 16) offset = 16; else if (npix > 8) offset = 8; else if (npix > 4) offset = 4; else if (npix > 2) offset = 2; else if (npix > 1) offset = 1; else offset = 0; } for(; offset > 0; offset /= 2){ int srcIdx = threadIdx.x + offset; float nVal = __shfl_down(val, offset); if (srcIdx < npix){ val = r(val, nVal); } } return val; } __device__ __inline__ float blockReduce(int npix, transformer t, reducer r) { const int FF_PIX_PASS = (npix + FF_INFO_THREADS_PER_CENTER - 1) / FF_INFO_THREADS_PER_CENTER; __shared__ float buffer[32]; for(int i = 0; i < FF_PIX_PASS; i++){ uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; int n = WARP_SIZE; if (i == FF_PIX_PASS - 1){ n = npix % WARP_SIZE; } float val = warpReduce(t(tmp_id), n, r); if (threadIdx.x == 0){ buffer[i] = val; } } return warpReduce(buffer[threadIdx.x], FF_PIX_PASS, r); } __device__ __inline__ bool inRing(int dr, int dc) { float dist2 = dr * dr + dc * dc; const float lower = r0 * r0; const float upper = (r0 + dr) * (r0 + dr); return dist2 >= lower && dist2 <= upper; } __device__ __inline__ bool peakIsPreSelected(float son, float npix, float amp_max, float amp_tot) { if (son < peak_son_min) return false; if (npix < peak_npix_min) return false; if (npix > peak_npix_max) return false; if (amp_max < peak_amax_thr) return false; if (amp_tot < peak_atot_thr) return false; return true; } // one center per block __global__ void floodFill_v2(const float *d_data, const uint *d_centers, Peak *d_peaks, uint *d_conmap) { const uint center_id = d_centers[blockIdx.x]; const uint img_id = center_id / (WIDTH * HEIGHT); const uint crow = center_id / WIDTH % HEIGHT; const uint ccol = center_id % WIDTH; __shared__ float data[PATCH_WIDTH][PATCH_WIDTH]; __shared__ uint status[PATCH_WIDTH][PATCH_WIDTH]; // load data for (int i = 0; i < FF_LOAD_PASS; i++) { const uint tmp_id = i * FF_LOAD_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; const int drow = crow + irow - HALF_WIDTH; const int dcol = ccol + icol - HALF_WIDTH; if (drow >= 0 && drow < HEIGHT && dcol >= 0 && dcol < WIDTH) { data[irow][icol] = d_data[img_id * (WIDTH * HEIGHT) + drow * WIDTH + dcol]; } else if(irow < PATCH_WIDTH) { data[irow][icol] = 0; } } for(int i = 0; i < FF_LOAD_PASS; i++) { const uint tmp_id = i * FF_LOAD_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; if (irow < PATCH_WIDTH){ status[irow][icol] = 0; } if (irow == HALF_WIDTH && icol == HALF_WIDTH){ status[irow][icol] = center_id; } } __syncthreads(); if (threadIdx.x >= FF_THREADS_PER_CENTER) return; // flood fill const int FF_SCAN_LENGTH = FF_THREADS_PER_CENTER / 8; const int sign_x[8] = {-1, 1, 1, -1, 1, 1, -1, -1}; const int sign_y[8] = {1, 1, -1, -1, 1, -1, -1, 1}; const int scanline_id = threadIdx.x / FF_SCAN_LENGTH; const int id_in_grp = threadIdx.x % (2 * FF_SCAN_LENGTH); const int base_v = id_in_grp - FF_SCAN_LENGTH; int icol = base_v * sign_x[scanline_id] + HALF_WIDTH; int irow = base_v * sign_y[scanline_id] + HALF_WIDTH; const int scangrp_id = threadIdx.x / (2 * FF_SCAN_LENGTH); const int dxs[4] = {-1, 1, 0, 0}; const int dys[4] = {0, 0, 1, -1}; const int dx = dxs[scangrp_id]; const int dy = dys[scangrp_id]; const float center_intensity = data[HALF_WIDTH][HALF_WIDTH]; __shared__ bool is_local_maximum; is_local_maximum = true; for(int i = 1; i <= rank; i++){ __syncthreads(); if (!is_local_maximum) return; icol += dx; irow += dy; if (data[irow][icol] > center_intensity){ is_local_maximum = false; } if (data[irow][icol] > thr_low){ if (status[irow-dy][icol-dx] == center_id){ status[irow][icol] = center_id; } } } const int bound = base_v > 0 ? base_v : -base_v; for(int i = 1; i <= FF_SCAN_LENGTH - 1; i++){ __syncthreads(); if (!is_local_maximum) return; if (i > bound) continue; icol += dx; irow += dy; // if (irow >= PATCH_WIDTH || icol >= PATCH_WIDTH || irow < 0 || icol < 0){ // printf("irow:%d, icol:%d, img_id:%d, i:%d\n", irow, icol, img_id, i); // } if (data[irow][icol] > center_intensity){ is_local_maximum = false; } if (data[irow][icol] > thr_low){ if (status[irow-dy][icol-dx] == center_id){ status[irow][icol] = center_id; } } } const int FF_PROC_PASS = (PATCH_WIDTH * PATCH_WIDTH + FF_INFO_THREADS_PER_CENTER - 1) / FF_INFO_THREADS_PER_CENTER; // calculate peak info __shared__ float peak_data[PATCH_WIDTH * PATCH_WIDTH]; __shared__ int peak_row[PATCH_WIDTH * PATCH_WIDTH]; __shared__ int peak_col[PATCH_WIDTH * PATCH_WIDTH]; // data compaction __shared__ int preSum[FF_INFO_THREADS_PER_CENTER]; preSum[threadIdx.x] = 0; for(int i = 0; i < FF_PROC_PASS; i++){ const uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; if (irow < PATCH_WIDTH && status[irow][icol] == center_id){ preSum[threadIdx.x] += 1; } } calPreSum(preSum); int npix = preSum[FF_INFO_THREADS_PER_CENTER - 1]; int counter = 0; __shared__ float bg_avg; __shared__ float bg_rms; __shared__ float bg_npix; if (threadIdx.x == 0){ bg_avg = 0; bg_rms = 0; bg_npix = 0; } for(int i = 0; i < FF_PROC_PASS; i++){ const uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; if (irow < PATCH_WIDTH){ if (status[irow][icol] == center_id){ int pos = counter; if (threadIdx.x > 0) pos += preSum[threadIdx.x - 1]; peak_data[pos] = data[irow][icol]; peak_row[pos] = irow; peak_col[pos] = icol; counter ++; } // calculate background info if (inRing(irow-HALF_WIDTH,icol-HALF_WIDTH) && data[irow][icol] < thr_low){ float d = data[irow][icol]; atomicAdd(&bg_avg,d); atomicAdd(&bg_rms,d * d); atomicAdd(&bg_npix, 1); } } } const int FF_PIX_PASS = (npix + FF_INFO_THREADS_PER_CENTER - 1) / FF_INFO_THREADS_PER_CENTER; __shared__ float buffer[32]; #define BLOCK_REDUCE(v,t,r) \ for(int i = 0; i < FF_PIX_PASS; i++){ \ uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; \ int n = WARP_SIZE; \ if (i == FF_PIX_PASS - 1){ \ n = npix % WARP_SIZE; \ } \ float val = warpReduce(t(tmp_id), n, r); \ if (threadIdx.x == 0){ \ buffer[i] = val; \ } \ } \ v= warpReduce(buffer[threadIdx.x], FF_PIX_PASS, r); float samp; BLOCK_REDUCE(samp, [=]__device__(const int &id) -> float {return peak_data[id];}, deviceAdd); __shared__ Peak peak; if (threadIdx.x == 0) { bg_avg /= bg_npix; bg_rms = bg_rms / bg_npix - bg_avg * bg_avg; bg_rms = sqrtf(bg_rms); float noise_tot = bg_rms * sqrtf(npix); peak.amp_tot = samp - bg_avg * npix; peak.amp_max = center_intensity - bg_avg; peak.son = noise_tot > 0 ? peak.amp_tot / noise_tot : 0; peak.bkgd = bg_avg; peak.noise = bg_rms; peak.valid = peakIsPreSelected(peak.son, npix, peak.amp_max, peak.amp_tot); } __syncthreads(); if (!peak.valid) return; // if (center_id == 4734401 && threadIdx.x == 0) // printf("sum of intensity:%f,\n", samp); float rmin; BLOCK_REDUCE(rmin, [=]__device__(const int &id) -> float {return peak_row[id];}, deviceMin); float rmax; BLOCK_REDUCE(rmax, [=]__device__(const int &id) -> float {return peak_row[id];}, deviceMax); float cmin; BLOCK_REDUCE(cmin, [=]__device__(const int &id) -> float {return peak_col[id];}, deviceMin); float cmax; BLOCK_REDUCE(cmax, [=]__device__(const int &id) -> float {return peak_col[id];}, deviceMax); float sar1; BLOCK_REDUCE(sar1, [=]__device__(const int &id) -> float {return peak_data[id] * peak_row[id];}, deviceAdd); float sac1; BLOCK_REDUCE(sac1, [=]__device__(const int &id) -> float {return peak_data[id] * peak_col[id];}, deviceAdd); float sar2; BLOCK_REDUCE(sar2, [=]__device__(const int &id) -> float {return peak_data[id] * peak_row[id] * peak_row[id];}, deviceAdd); float sac2; BLOCK_REDUCE(sac2, [=]__device__(const int &id) -> float {return peak_data[id] * peak_col[id] * peak_col[id];}, deviceAdd); if(threadIdx.x == 0){ peak.evt = img_id / SHOTS; peak.seg = img_id % SHOTS; // printf("center_id: %d, evt: %f, seg:%f\n", center_id, peak.evt, peak.seg); peak.row = crow; peak.col = ccol; peak.npix = npix; peak.row_min = rmin; peak.row_max = rmax; peak.col_min = cmin; peak.col_max = cmax; if (samp > 0){ sar1 = sar1 / samp; sac1 = sac1 / samp; sar2 = sar2 / samp - sar1 * sar1; sac2 = sac2 / samp - sac1 * sac1; peak.row_cgrav = sar1; peak.col_cgrav = sac1; peak.row_sigma = (npix > 1 && sar2 > 0) ? sqrtf(sar2) : 0; peak.col_sigma = (npix > 1 && sac2 > 0) ? sqrtf(sac2) : 0; } else { peak.row_cgrav = crow; peak.col_cgrav = ccol; peak.row_sigma = 0; peak.col_sigma = 0; } d_peaks[blockIdx.x] = peak; } // printf("center_id:%d\n", center_id); // write data for(int i = 0; i < FF_PROC_PASS; i++){ const uint tmp_id = i * FF_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; const int drow = crow + irow - rank; const int dcol = ccol + icol - rank; if (irow < PATCH_WIDTH && status[irow][icol] == center_id && drow >= 0 && drow < HEIGHT && dcol >= 0 && dcol < WIDTH) { // if(img_id == 9) // printf("irow:%d, icol:%d, center_id:%d\n", irow, icol, center_id); d_conmap[img_id * (WIDTH * HEIGHT) + drow * WIDTH + dcol] = status[irow][icol]; } } } struct is_center { __device__ bool operator()(const uint &x){ // return x == addr_conmap[x]; return x > 0; } }; void checkCudaError(hipError_t err, const char* msg) { if (err != hipSuccess) { printf("failed: %s\n, error code: %s\n", msg, hipGetErrorString(err)); } } void getCudaError(const char *msg) { hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("failed: %s\n, error code %s\n", msg, hipGetErrorString(err)); } } void setUpData(float *h_data) { checkCudaError(hipMalloc((void **)&d_data, LSIZE * sizeof(float)), "hipMalloc d_data"); checkCudaError(hipMalloc((void **)&d_conmap, LSIZE * sizeof(uint)), "hipMalloc d_conmap"); checkCudaError(hipMemset(d_conmap, 0, sizeof(uint)*LSIZE), "hipMemset d_conmap"); checkCudaError(hipMemcpy(d_data, h_data, LSIZE * sizeof(float), hipMemcpyHostToDevice), "hipMemcpy h2d"); } void releaseData() { hipFree(d_data); hipFree(d_conmap); } extern "C" void processImages(float *data, Peak *&peak_out, int &npeaks, uint *data_out) { float miliseconds = 0.0f; hipEvent_t t0, t1; hipEventCreate(&t0); hipEventCreate(&t1); hipEventRecord(t0); setUpData(data); hipEventRecord(t1); hipEventSynchronize(t1); hipEventElapsedTime(&miliseconds, t0, t1); printf("passing data to gpu takes %f miliseconds\n", miliseconds); // floodFill v2 printf("filterByThrHigh_v2: num_blocks:%ld\n", FILTER_PATCH_PER_IMAGE * EVENTS * SHOTS); const int centers_size = FILTER_PATCH_PER_IMAGE * (FILTER_PATCH_WIDTH / FILTER_PATCH_HEIGHT) * EVENTS * SHOTS; checkCudaError(hipMalloc((void **)&d_centers, centers_size * sizeof(uint)), "hipMalloc d_centers"); checkCudaError(hipMemset(d_centers, 0, centers_size * sizeof(uint)), "hipMemset d_centers"); checkCudaError(hipMalloc((void **)&d_dense_centers, centers_size * sizeof(uint)), "hipMalloc d_dense_centers"); hipDeviceSynchronize(); hipEventRecord(t0); hipLaunchKernelGGL(( filterByThrHigh_v2), dim3(FILTER_PATCH_PER_IMAGE * EVENTS * SHOTS), dim3(FILTER_THREADS_PER_PATCH), 0, 0, d_data, d_centers); getCudaError("filterByThrHigh_v2"); hipEventRecord(t1); hipEventSynchronize(t1); hipEventElapsedTime(&miliseconds, t0, t1); printf("filterByThrHigh_v2 takes %f miliseconds\n", miliseconds); hipEventRecord(t0); thrust::device_ptr<uint> dp_dense_centers = thrust::device_pointer_cast(d_dense_centers); thrust::device_ptr<uint> dp_centers = thrust::device_pointer_cast(d_centers); auto end_centers = thrust::copy_if(dp_centers, dp_centers + centers_size, dp_dense_centers, is_center()); int num_pix = end_centers - dp_dense_centers; printf("num of testing pixels:%d\n", num_pix); hipEventRecord(t1); hipEventSynchronize(t1); hipEventElapsedTime(&miliseconds, t0, t1); printf("stream compaction takes %f miliseconds\n", miliseconds); const int NUM_BLOCKS = num_pix / FF_CENTERS_PER_BLOCK; npeaks = num_pix; hipEventRecord(t0); checkCudaError(hipMalloc((void **)&d_peaks, num_pix * sizeof(Peak)), "hipMalloc d_peaks"); checkCudaError(hipMemset(d_peaks, 0, num_pix * sizeof(Peak)), "hipMemset d_peaks"); hipLaunchKernelGGL(( floodFill_v2), dim3(NUM_BLOCKS), dim3(FF_THREADS_PER_BLOCK), 0, 0, d_data, d_dense_centers, d_peaks, d_conmap); hipEventRecord(t1); hipEventSynchronize(t1); hipEventElapsedTime(&miliseconds, t0, t1); printf("floodFill_v2 takes %f miliseconds\n", miliseconds); getCudaError("floodFill_v2"); peak_out = new Peak[num_pix]; hipEventRecord(t0); checkCudaError(hipMemcpy(peak_out, d_peaks, num_pix * sizeof(Peak), hipMemcpyDeviceToHost), "hipMemcpy d2h"); hipEventRecord(t1); hipEventSynchronize(t1); hipEventElapsedTime(&miliseconds, t0, t1); printf("copying peaks to cpu takes %f miliseconds\n", miliseconds); if (data_out != NULL) { checkCudaError(hipMemcpy(data_out, d_conmap, LSIZE * sizeof(uint), hipMemcpyDeviceToHost), "hipMemcpy d2h"); } releaseData(); }
7b1ca847c9cd3e742187226360a539816d06489f.cu
#include "peakFinder.h" #include <stdio.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <functional> float *d_data = NULL; uint *d_conmap = NULL; uint *d_centers = NULL; uint *d_dense_centers = NULL; Peak *d_peaks = NULL; const int FILTER_PATCH_WIDTH = 32; const int FILTER_PATCH_HEIGHT = 4; const int FILTER_THREADS_PER_PATCH = FILTER_PATCH_WIDTH * FILTER_PATCH_HEIGHT; const int FILTER_PATCH_ON_WIDTH = (WIDTH) / FILTER_PATCH_WIDTH; const int FILTER_PATCH_ON_HEIGHT = (HEIGHT + FILTER_PATCH_HEIGHT - 1) / FILTER_PATCH_HEIGHT; const int FILTER_PATCH_PER_IMAGE = FILTER_PATCH_ON_WIDTH * FILTER_PATCH_ON_HEIGHT; __global__ void filterByThrHigh_v2(const float *d_data, uint *d_centers) { uint imgId = blockIdx.x / FILTER_PATCH_PER_IMAGE; uint patch_id = blockIdx.x % FILTER_PATCH_PER_IMAGE; uint patch_x = patch_id % FILTER_PATCH_ON_WIDTH; uint patch_y = patch_id / FILTER_PATCH_ON_WIDTH; __shared__ float data[FILTER_PATCH_HEIGHT * FILTER_PATCH_WIDTH]; __shared__ uint idxs[FILTER_PATCH_HEIGHT * FILTER_PATCH_WIDTH]; int irow = threadIdx.x / FILTER_PATCH_WIDTH; int icol = threadIdx.x % FILTER_PATCH_WIDTH; int row = patch_y * FILTER_PATCH_HEIGHT + irow; int col = patch_x * FILTER_PATCH_WIDTH + icol; const int NUM_NMS_AREA = FILTER_PATCH_WIDTH / FILTER_PATCH_HEIGHT; int local_area = icol / FILTER_PATCH_HEIGHT; int local_pos = local_area * (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT) + irow * FILTER_PATCH_HEIGHT + icol % FILTER_PATCH_HEIGHT; uint device_pos = imgId * (WIDTH * HEIGHT) + row * WIDTH + col; __shared__ bool has_candidate[NUM_NMS_AREA]; if (threadIdx.x < NUM_NMS_AREA) has_candidate[threadIdx.x] = false; __syncthreads(); // load data if (row < WIDTH && col < HEIGHT){ data[local_pos] = d_data[device_pos]; idxs[local_pos] = device_pos; } else{ data[local_pos] = 0; } if (data[local_pos] > thr_high) has_candidate[local_area] = true; __syncthreads(); // find maximum local_area = threadIdx.x / (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT); if (!has_candidate[local_area]) return; const int local_tid = threadIdx.x % (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT); const int local_offset = local_area * (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT); int num_of_working_threads = (FILTER_PATCH_HEIGHT * FILTER_PATCH_HEIGHT) / 2; // if (local_tid >= num_of_working_threads) return; int idx_mul = 1; while (num_of_working_threads > 1 && local_tid < num_of_working_threads) { int idx1 = (local_tid * 2) * idx_mul + local_offset; int idx2 = idx1 + idx_mul; int idxm = data[idx1] > data[idx2] ? idx1 : idx2; data[idx1] = data[idxm]; idxs[idx1] = idxs[idxm]; __syncthreads(); idx_mul *= 2; num_of_working_threads /= 2; } if (local_tid == 0) { uint write_pos = blockIdx.x * NUM_NMS_AREA + local_area; d_centers[write_pos] = idxs[local_offset]; } } const int PATCH_WIDTH = (2 * HALF_WIDTH + 1); const int FF_LOAD_THREADS_PER_CENTER = 64; const int FF_THREADS_PER_CENTER = 32; const int FF_INFO_THREADS_PER_CENTER = FF_THREADS_PER_CENTER; // const int FF_SIDE_WIDTH = FF_THREADS_PER_CENTER / 4; // const int FF_SIDE_OFFSET = 1 - FF_SIDE_WIDTH / 2; const int FF_THREADS_PER_BLOCK = 64; const int FF_LOAD_PASS = (2 * HALF_WIDTH + 1) * (2 * HALF_WIDTH + 1) / FF_LOAD_THREADS_PER_CENTER + 1; const int FF_CENTERS_PER_BLOCK = FF_THREADS_PER_BLOCK / FF_LOAD_THREADS_PER_CENTER; __device__ void calPreSum(int *preSum) { for(int i = 1; i < FF_INFO_THREADS_PER_CENTER; i*=2){ int prevIdx = threadIdx.x - i; int sum = preSum[threadIdx.x]; if (prevIdx > 0){ sum += preSum[prevIdx]; } __syncthreads(); preSum[threadIdx.x] = sum; __syncthreads(); } } typedef float (*reducer) (const float &, const float &); typedef float (*transformer) (const int &); __device__ float deviceAdd(const float &a, const float &b) {return a+b;} __device__ float deviceMin(const float &a, const float &b) {return a<b?a:b;} __device__ float deviceMax(const float &a, const float &b) {return a>b?a:b;} const int WARP_SIZE = 32; __device__ __inline__ float warpReduce(float val, int npix, reducer r) { int offset = 32; if (npix < 32) { if (npix > 16) offset = 16; else if (npix > 8) offset = 8; else if (npix > 4) offset = 4; else if (npix > 2) offset = 2; else if (npix > 1) offset = 1; else offset = 0; } for(; offset > 0; offset /= 2){ int srcIdx = threadIdx.x + offset; float nVal = __shfl_down(val, offset); if (srcIdx < npix){ val = r(val, nVal); } } return val; } __device__ __inline__ float blockReduce(int npix, transformer t, reducer r) { const int FF_PIX_PASS = (npix + FF_INFO_THREADS_PER_CENTER - 1) / FF_INFO_THREADS_PER_CENTER; __shared__ float buffer[32]; for(int i = 0; i < FF_PIX_PASS; i++){ uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; int n = WARP_SIZE; if (i == FF_PIX_PASS - 1){ n = npix % WARP_SIZE; } float val = warpReduce(t(tmp_id), n, r); if (threadIdx.x == 0){ buffer[i] = val; } } return warpReduce(buffer[threadIdx.x], FF_PIX_PASS, r); } __device__ __inline__ bool inRing(int dr, int dc) { float dist2 = dr * dr + dc * dc; const float lower = r0 * r0; const float upper = (r0 + dr) * (r0 + dr); return dist2 >= lower && dist2 <= upper; } __device__ __inline__ bool peakIsPreSelected(float son, float npix, float amp_max, float amp_tot) { if (son < peak_son_min) return false; if (npix < peak_npix_min) return false; if (npix > peak_npix_max) return false; if (amp_max < peak_amax_thr) return false; if (amp_tot < peak_atot_thr) return false; return true; } // one center per block __global__ void floodFill_v2(const float *d_data, const uint *d_centers, Peak *d_peaks, uint *d_conmap) { const uint center_id = d_centers[blockIdx.x]; const uint img_id = center_id / (WIDTH * HEIGHT); const uint crow = center_id / WIDTH % HEIGHT; const uint ccol = center_id % WIDTH; __shared__ float data[PATCH_WIDTH][PATCH_WIDTH]; __shared__ uint status[PATCH_WIDTH][PATCH_WIDTH]; // load data for (int i = 0; i < FF_LOAD_PASS; i++) { const uint tmp_id = i * FF_LOAD_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; const int drow = crow + irow - HALF_WIDTH; const int dcol = ccol + icol - HALF_WIDTH; if (drow >= 0 && drow < HEIGHT && dcol >= 0 && dcol < WIDTH) { data[irow][icol] = d_data[img_id * (WIDTH * HEIGHT) + drow * WIDTH + dcol]; } else if(irow < PATCH_WIDTH) { data[irow][icol] = 0; } } for(int i = 0; i < FF_LOAD_PASS; i++) { const uint tmp_id = i * FF_LOAD_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; if (irow < PATCH_WIDTH){ status[irow][icol] = 0; } if (irow == HALF_WIDTH && icol == HALF_WIDTH){ status[irow][icol] = center_id; } } __syncthreads(); if (threadIdx.x >= FF_THREADS_PER_CENTER) return; // flood fill const int FF_SCAN_LENGTH = FF_THREADS_PER_CENTER / 8; const int sign_x[8] = {-1, 1, 1, -1, 1, 1, -1, -1}; const int sign_y[8] = {1, 1, -1, -1, 1, -1, -1, 1}; const int scanline_id = threadIdx.x / FF_SCAN_LENGTH; const int id_in_grp = threadIdx.x % (2 * FF_SCAN_LENGTH); const int base_v = id_in_grp - FF_SCAN_LENGTH; int icol = base_v * sign_x[scanline_id] + HALF_WIDTH; int irow = base_v * sign_y[scanline_id] + HALF_WIDTH; const int scangrp_id = threadIdx.x / (2 * FF_SCAN_LENGTH); const int dxs[4] = {-1, 1, 0, 0}; const int dys[4] = {0, 0, 1, -1}; const int dx = dxs[scangrp_id]; const int dy = dys[scangrp_id]; const float center_intensity = data[HALF_WIDTH][HALF_WIDTH]; __shared__ bool is_local_maximum; is_local_maximum = true; for(int i = 1; i <= rank; i++){ __syncthreads(); if (!is_local_maximum) return; icol += dx; irow += dy; if (data[irow][icol] > center_intensity){ is_local_maximum = false; } if (data[irow][icol] > thr_low){ if (status[irow-dy][icol-dx] == center_id){ status[irow][icol] = center_id; } } } const int bound = base_v > 0 ? base_v : -base_v; for(int i = 1; i <= FF_SCAN_LENGTH - 1; i++){ __syncthreads(); if (!is_local_maximum) return; if (i > bound) continue; icol += dx; irow += dy; // if (irow >= PATCH_WIDTH || icol >= PATCH_WIDTH || irow < 0 || icol < 0){ // printf("irow:%d, icol:%d, img_id:%d, i:%d\n", irow, icol, img_id, i); // } if (data[irow][icol] > center_intensity){ is_local_maximum = false; } if (data[irow][icol] > thr_low){ if (status[irow-dy][icol-dx] == center_id){ status[irow][icol] = center_id; } } } const int FF_PROC_PASS = (PATCH_WIDTH * PATCH_WIDTH + FF_INFO_THREADS_PER_CENTER - 1) / FF_INFO_THREADS_PER_CENTER; // calculate peak info __shared__ float peak_data[PATCH_WIDTH * PATCH_WIDTH]; __shared__ int peak_row[PATCH_WIDTH * PATCH_WIDTH]; __shared__ int peak_col[PATCH_WIDTH * PATCH_WIDTH]; // data compaction __shared__ int preSum[FF_INFO_THREADS_PER_CENTER]; preSum[threadIdx.x] = 0; for(int i = 0; i < FF_PROC_PASS; i++){ const uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; if (irow < PATCH_WIDTH && status[irow][icol] == center_id){ preSum[threadIdx.x] += 1; } } calPreSum(preSum); int npix = preSum[FF_INFO_THREADS_PER_CENTER - 1]; int counter = 0; __shared__ float bg_avg; __shared__ float bg_rms; __shared__ float bg_npix; if (threadIdx.x == 0){ bg_avg = 0; bg_rms = 0; bg_npix = 0; } for(int i = 0; i < FF_PROC_PASS; i++){ const uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; if (irow < PATCH_WIDTH){ if (status[irow][icol] == center_id){ int pos = counter; if (threadIdx.x > 0) pos += preSum[threadIdx.x - 1]; peak_data[pos] = data[irow][icol]; peak_row[pos] = irow; peak_col[pos] = icol; counter ++; } // calculate background info if (inRing(irow-HALF_WIDTH,icol-HALF_WIDTH) && data[irow][icol] < thr_low){ float d = data[irow][icol]; atomicAdd(&bg_avg,d); atomicAdd(&bg_rms,d * d); atomicAdd(&bg_npix, 1); } } } const int FF_PIX_PASS = (npix + FF_INFO_THREADS_PER_CENTER - 1) / FF_INFO_THREADS_PER_CENTER; __shared__ float buffer[32]; #define BLOCK_REDUCE(v,t,r) \ for(int i = 0; i < FF_PIX_PASS; i++){ \ uint tmp_id = i * FF_INFO_THREADS_PER_CENTER + threadIdx.x; \ int n = WARP_SIZE; \ if (i == FF_PIX_PASS - 1){ \ n = npix % WARP_SIZE; \ } \ float val = warpReduce(t(tmp_id), n, r); \ if (threadIdx.x == 0){ \ buffer[i] = val; \ } \ } \ v= warpReduce(buffer[threadIdx.x], FF_PIX_PASS, r); float samp; BLOCK_REDUCE(samp, [=]__device__(const int &id) -> float {return peak_data[id];}, deviceAdd); __shared__ Peak peak; if (threadIdx.x == 0) { bg_avg /= bg_npix; bg_rms = bg_rms / bg_npix - bg_avg * bg_avg; bg_rms = sqrtf(bg_rms); float noise_tot = bg_rms * sqrtf(npix); peak.amp_tot = samp - bg_avg * npix; peak.amp_max = center_intensity - bg_avg; peak.son = noise_tot > 0 ? peak.amp_tot / noise_tot : 0; peak.bkgd = bg_avg; peak.noise = bg_rms; peak.valid = peakIsPreSelected(peak.son, npix, peak.amp_max, peak.amp_tot); } __syncthreads(); if (!peak.valid) return; // if (center_id == 4734401 && threadIdx.x == 0) // printf("sum of intensity:%f,\n", samp); float rmin; BLOCK_REDUCE(rmin, [=]__device__(const int &id) -> float {return peak_row[id];}, deviceMin); float rmax; BLOCK_REDUCE(rmax, [=]__device__(const int &id) -> float {return peak_row[id];}, deviceMax); float cmin; BLOCK_REDUCE(cmin, [=]__device__(const int &id) -> float {return peak_col[id];}, deviceMin); float cmax; BLOCK_REDUCE(cmax, [=]__device__(const int &id) -> float {return peak_col[id];}, deviceMax); float sar1; BLOCK_REDUCE(sar1, [=]__device__(const int &id) -> float {return peak_data[id] * peak_row[id];}, deviceAdd); float sac1; BLOCK_REDUCE(sac1, [=]__device__(const int &id) -> float {return peak_data[id] * peak_col[id];}, deviceAdd); float sar2; BLOCK_REDUCE(sar2, [=]__device__(const int &id) -> float {return peak_data[id] * peak_row[id] * peak_row[id];}, deviceAdd); float sac2; BLOCK_REDUCE(sac2, [=]__device__(const int &id) -> float {return peak_data[id] * peak_col[id] * peak_col[id];}, deviceAdd); if(threadIdx.x == 0){ peak.evt = img_id / SHOTS; peak.seg = img_id % SHOTS; // printf("center_id: %d, evt: %f, seg:%f\n", center_id, peak.evt, peak.seg); peak.row = crow; peak.col = ccol; peak.npix = npix; peak.row_min = rmin; peak.row_max = rmax; peak.col_min = cmin; peak.col_max = cmax; if (samp > 0){ sar1 = sar1 / samp; sac1 = sac1 / samp; sar2 = sar2 / samp - sar1 * sar1; sac2 = sac2 / samp - sac1 * sac1; peak.row_cgrav = sar1; peak.col_cgrav = sac1; peak.row_sigma = (npix > 1 && sar2 > 0) ? sqrtf(sar2) : 0; peak.col_sigma = (npix > 1 && sac2 > 0) ? sqrtf(sac2) : 0; } else { peak.row_cgrav = crow; peak.col_cgrav = ccol; peak.row_sigma = 0; peak.col_sigma = 0; } d_peaks[blockIdx.x] = peak; } // printf("center_id:%d\n", center_id); // write data for(int i = 0; i < FF_PROC_PASS; i++){ const uint tmp_id = i * FF_THREADS_PER_CENTER + threadIdx.x; const uint irow = tmp_id / PATCH_WIDTH; const uint icol = tmp_id % PATCH_WIDTH; const int drow = crow + irow - rank; const int dcol = ccol + icol - rank; if (irow < PATCH_WIDTH && status[irow][icol] == center_id && drow >= 0 && drow < HEIGHT && dcol >= 0 && dcol < WIDTH) { // if(img_id == 9) // printf("irow:%d, icol:%d, center_id:%d\n", irow, icol, center_id); d_conmap[img_id * (WIDTH * HEIGHT) + drow * WIDTH + dcol] = status[irow][icol]; } } } struct is_center { __device__ bool operator()(const uint &x){ // return x == addr_conmap[x]; return x > 0; } }; void checkCudaError(cudaError_t err, const char* msg) { if (err != cudaSuccess) { printf("failed: %s\n, error code: %s\n", msg, cudaGetErrorString(err)); } } void getCudaError(const char *msg) { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("failed: %s\n, error code %s\n", msg, cudaGetErrorString(err)); } } void setUpData(float *h_data) { checkCudaError(cudaMalloc((void **)&d_data, LSIZE * sizeof(float)), "cudaMalloc d_data"); checkCudaError(cudaMalloc((void **)&d_conmap, LSIZE * sizeof(uint)), "cudaMalloc d_conmap"); checkCudaError(cudaMemset(d_conmap, 0, sizeof(uint)*LSIZE), "cudaMemset d_conmap"); checkCudaError(cudaMemcpy(d_data, h_data, LSIZE * sizeof(float), cudaMemcpyHostToDevice), "cudaMemcpy h2d"); } void releaseData() { cudaFree(d_data); cudaFree(d_conmap); } extern "C" void processImages(float *data, Peak *&peak_out, int &npeaks, uint *data_out) { float miliseconds = 0.0f; cudaEvent_t t0, t1; cudaEventCreate(&t0); cudaEventCreate(&t1); cudaEventRecord(t0); setUpData(data); cudaEventRecord(t1); cudaEventSynchronize(t1); cudaEventElapsedTime(&miliseconds, t0, t1); printf("passing data to gpu takes %f miliseconds\n", miliseconds); // floodFill v2 printf("filterByThrHigh_v2: num_blocks:%ld\n", FILTER_PATCH_PER_IMAGE * EVENTS * SHOTS); const int centers_size = FILTER_PATCH_PER_IMAGE * (FILTER_PATCH_WIDTH / FILTER_PATCH_HEIGHT) * EVENTS * SHOTS; checkCudaError(cudaMalloc((void **)&d_centers, centers_size * sizeof(uint)), "cudaMalloc d_centers"); checkCudaError(cudaMemset(d_centers, 0, centers_size * sizeof(uint)), "cudaMemset d_centers"); checkCudaError(cudaMalloc((void **)&d_dense_centers, centers_size * sizeof(uint)), "cudaMalloc d_dense_centers"); cudaDeviceSynchronize(); cudaEventRecord(t0); filterByThrHigh_v2<<<FILTER_PATCH_PER_IMAGE * EVENTS * SHOTS, FILTER_THREADS_PER_PATCH>>>(d_data, d_centers); getCudaError("filterByThrHigh_v2"); cudaEventRecord(t1); cudaEventSynchronize(t1); cudaEventElapsedTime(&miliseconds, t0, t1); printf("filterByThrHigh_v2 takes %f miliseconds\n", miliseconds); cudaEventRecord(t0); thrust::device_ptr<uint> dp_dense_centers = thrust::device_pointer_cast(d_dense_centers); thrust::device_ptr<uint> dp_centers = thrust::device_pointer_cast(d_centers); auto end_centers = thrust::copy_if(dp_centers, dp_centers + centers_size, dp_dense_centers, is_center()); int num_pix = end_centers - dp_dense_centers; printf("num of testing pixels:%d\n", num_pix); cudaEventRecord(t1); cudaEventSynchronize(t1); cudaEventElapsedTime(&miliseconds, t0, t1); printf("stream compaction takes %f miliseconds\n", miliseconds); const int NUM_BLOCKS = num_pix / FF_CENTERS_PER_BLOCK; npeaks = num_pix; cudaEventRecord(t0); checkCudaError(cudaMalloc((void **)&d_peaks, num_pix * sizeof(Peak)), "cudaMalloc d_peaks"); checkCudaError(cudaMemset(d_peaks, 0, num_pix * sizeof(Peak)), "cudaMemset d_peaks"); floodFill_v2<<<NUM_BLOCKS, FF_THREADS_PER_BLOCK>>>(d_data, d_dense_centers, d_peaks, d_conmap); cudaEventRecord(t1); cudaEventSynchronize(t1); cudaEventElapsedTime(&miliseconds, t0, t1); printf("floodFill_v2 takes %f miliseconds\n", miliseconds); getCudaError("floodFill_v2"); peak_out = new Peak[num_pix]; cudaEventRecord(t0); checkCudaError(cudaMemcpy(peak_out, d_peaks, num_pix * sizeof(Peak), cudaMemcpyDeviceToHost), "cudaMemcpy d2h"); cudaEventRecord(t1); cudaEventSynchronize(t1); cudaEventElapsedTime(&miliseconds, t0, t1); printf("copying peaks to cpu takes %f miliseconds\n", miliseconds); if (data_out != NULL) { checkCudaError(cudaMemcpy(data_out, d_conmap, LSIZE * sizeof(uint), cudaMemcpyDeviceToHost), "cudaMemcpy d2h"); } releaseData(); }
40be974718b97f2c6f316d5ac4d9960898004cdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C"__global__ void SoftMaxLossFloat( float *input, // input sfrom the soft max function float *output, // output going back through to the softmax function int size, // int classlocation, float *loss){ if (threadIdx.x<size){ if (threadIdx.x==classlocation){ output[threadIdx.x] =input[threadIdx.x]-1.0 *loss= -__logf(input[threadIdx.x]) }else{ output[threadIdx.x]=input[threadIdx.x] } } }
40be974718b97f2c6f316d5ac4d9960898004cdd.cu
extern "C"__global__ void SoftMaxLossFloat( float *input, // input sfrom the soft max function float *output, // output going back through to the softmax function int size, // int classlocation, float *loss){ if (threadIdx.x<size){ if (threadIdx.x==classlocation){ output[threadIdx.x] =input[threadIdx.x]-1.0 *loss= -__logf(input[threadIdx.x]) }else{ output[threadIdx.x]=input[threadIdx.x] } } }
0e1c4860cc3a9433bc23d146c50c92efcc8e9313.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// @file fdwt53.cu /// @brief CUDA implementation of forward 5/3 2D DWT. /// @author Martin Jirman ([email protected]) /// @date 2011-02-04 13:23 /// /// /// Copyright (c) 2011 Martin Jirman /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE /// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE /// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR /// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF /// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN /// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) /// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. /// #include "common.h" #include "transform_buffer.h" #include "io.h" namespace dwt_cuda { /// Wraps buffer and methods needed for computing one level of 5/3 FDWT /// using sliding window approach. /// @tparam WIN_SIZE_X width of sliding window /// @tparam WIN_SIZE_Y height of sliding window template <int WIN_SIZE_X, int WIN_SIZE_Y> class FDWT53 { private: /// Info needed for processing of one input column. /// @tparam CHECKED_LOADER true if column's loader should check boundaries /// false if there are no near boudnaries to check template <bool CHECKED_LOADER> struct FDWT53Column { /// loader for the column VerticalDWTPixelLoader<int, CHECKED_LOADER> loader; /// offset of the column in shared buffer int offset; // backup of first 3 loaded pixels (not transformed) int pixel0, pixel1, pixel2; /// Sets all fields to anything to prevent 'uninitialized' warnings. __device__ void clear() { offset = pixel0 = pixel1 = pixel2 = 0; loader.clear(); } }; /// Type of shared memory buffer for 5/3 FDWT transforms. typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> FDWT53Buffer; /// Actual shared buffer used for forward 5/3 DWT. FDWT53Buffer buffer; /// Difference between indices of two vertical neighbors in buffer. enum { STRIDE = FDWT53Buffer::VERTICAL_STRIDE }; /// Forward 5/3 DWT predict operation. struct Forward53Predict { __device__ void operator() (const int p, int & c, const int n) const { // c = n; c -= (p + n) / 2; // F.8, page 126, ITU-T Rec. T.800 final draft the real one } }; /// Forward 5/3 DWT update operation. struct Forward53Update { __device__ void operator() (const int p, int & c, const int n) const { c += (p + n + 2) / 4; // F.9, page 126, ITU-T Rec. T.800 final draft } }; /// Initializes one column: computes offset of the column in shared memory /// buffer, initializes loader and finally uses it to load first 3 pixels. /// @tparam CHECKED true if loader of the column checks boundaries /// @param column (uninitialized) column info to be initialized /// @param input input image /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param colIndex x-axis coordinate of the column (relative to the left /// side of this threadblock's block of input pixels) /// @param firstY y-axis coordinate of first image row to be transformed template <bool CHECKED> __device__ void initColumn(FDWT53Column<CHECKED> & column, const int * const input, const int sizeX, const int sizeY, const int colIndex, const int firstY) { // get offset of the column with index 'cId' column.offset = buffer.getColumnOffset(colIndex); // coordinates of the first pixel to be loaded const int firstX = blockIdx.x * WIN_SIZE_X + colIndex; if(blockIdx.y == 0) { // topmost block - apply mirroring rules when loading first 3 rows column.loader.init(sizeX, sizeY, firstX, firstY); // load pixels in mirrored way column.pixel2 = column.loader.loadFrom(input); // loaded pixel #0 column.pixel1 = column.loader.loadFrom(input); // loaded pixel #1 column.pixel0 = column.loader.loadFrom(input); // loaded pixel #2 // reinitialize loader to start with pixel #1 again column.loader.init(sizeX, sizeY, firstX, firstY + 1); } else { // non-topmost row - regular loading: column.loader.init(sizeX, sizeY, firstX, firstY - 2); // load 3 rows into the column column.pixel0 = column.loader.loadFrom(input); column.pixel1 = column.loader.loadFrom(input); column.pixel2 = column.loader.loadFrom(input); // Now, the next pixel, which will be loaded by loader, is pixel #1. } } /// Loads and vertically transforms given column. Assumes that first 3 /// pixels are already loaded in column fields pixel0 ... pixel2. /// @tparam CHECKED true if loader of the column checks boundaries /// @param column column to be loaded and vertically transformed /// @param input pointer to input image data template <bool CHECKED> __device__ void loadAndVerticallyTransform(FDWT53Column<CHECKED> & column, const int * const input) { // take 3 loaded pixels and put them into shared memory transform buffer buffer[column.offset + 0 * STRIDE] = column.pixel0; buffer[column.offset + 1 * STRIDE] = column.pixel1; buffer[column.offset + 2 * STRIDE] = column.pixel2; // load remaining pixels to be able to vertically transform the window for(int i = 3; i < (3 + WIN_SIZE_Y); i++) { buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input); } // remember last 3 pixels for use in next iteration column.pixel0 = buffer[column.offset + (WIN_SIZE_Y + 0) * STRIDE]; column.pixel1 = buffer[column.offset + (WIN_SIZE_Y + 1) * STRIDE]; column.pixel2 = buffer[column.offset + (WIN_SIZE_Y + 2) * STRIDE]; // vertically transform the column in transform buffer buffer.forEachVerticalOdd(column.offset, Forward53Predict()); buffer.forEachVerticalEven(column.offset, Forward53Update()); } /// Actual implementation of 5/3 FDWT. /// @tparam CHECK_LOADS true if input loader must check boundaries /// @tparam CHECK_WRITES true if output writer must check boundaries /// @param in input image /// @param out output buffer /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param winSteps number of sliding window steps template <bool CHECK_LOADS, bool CHECK_WRITES> __device__ void transform(const int * const in, int * const out, const int sizeX, const int sizeY, const int winSteps) { // info about one main and one boundary columns processed by this thread FDWT53Column<CHECK_LOADS> column; FDWT53Column<CHECK_LOADS> boundaryColumn; // only few threads use this // Initialize all column info: initialize loaders, compute offset of // column in shared buffer and initialize loader of column. const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps; initColumn(column, in, sizeX, sizeY, threadIdx.x, firstY); //has been checked Mar 9th // first 3 threads initialize boundary columns, others do not use them boundaryColumn.clear(); if(threadIdx.x < 3) { // index of boundary column (relative x-axis coordinate of the column) const int colId = threadIdx.x + ((threadIdx.x == 0) ? WIN_SIZE_X : -3); // initialize the column initColumn(boundaryColumn, in, sizeX, sizeY, colId, firstY); } // index of column which will be written into output by this thread const int outColumnIndex = parityIdx<WIN_SIZE_X>(); // offset of column which will be written by this thread into output const int outColumnOffset = buffer.getColumnOffset(outColumnIndex); // initialize output writer for this thread const int outputFirstX = blockIdx.x * WIN_SIZE_X + outColumnIndex; VerticalDWTBandWriter<int, CHECK_WRITES> writer; writer.init(sizeX, sizeY, outputFirstX, firstY); // Sliding window iterations: // Each iteration assumes that first 3 pixels of each column are loaded. for(int w = 0; w < winSteps; w++) { // For each column (including boundary columns): load and vertically // transform another WIN_SIZE_Y lines. loadAndVerticallyTransform(column, in); if(threadIdx.x < 3) { loadAndVerticallyTransform(boundaryColumn, in); } // wait for all columns to be vertically transformed and transform all // output rows horizontally __syncthreads(); buffer.forEachHorizontalOdd(2, WIN_SIZE_Y, Forward53Predict()); __syncthreads(); buffer.forEachHorizontalEven(2, WIN_SIZE_Y, Forward53Update()); // wait for all output rows to be transformed horizontally and write // them into output buffer __syncthreads(); for(int r = 2; r < (2 + WIN_SIZE_Y); r += 2) { // Write low coefficients from output column into low band ... writer.writeLowInto(out, buffer[outColumnOffset + r * STRIDE]); // ... and high coeficients into the high band. writer.writeHighInto(out, buffer[outColumnOffset + (r+1) * STRIDE]); } // before proceeding to next iteration, wait for all output columns // to be written into the output __syncthreads(); } } public: /// Determines, whether this block's pixels touch boundary and selects /// right version of algorithm according to it - for many threadblocks, it /// selects version which does not deal with boundary mirroring and thus is /// slightly faster. /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image /// @param steps number of sliding window steps __device__ static void run(const int * const in, int * const out, const int sx, const int sy, const int steps) { // if(blockIdx.x==0 && blockIdx.y ==11 && threadIdx.x >=0&&threadIdx.x <64){ // object with transform buffer in shared memory __shared__ FDWT53<WIN_SIZE_X, WIN_SIZE_Y> fdwt53; // Compute limits of this threadblock's block of pixels and use them to // determine, whether this threadblock will have to deal with boundary. // (1 in next expressions is for radius of impulse response of 9/7 FDWT.) const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1; const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1; const bool atRightBoudary = maxX >= sx; const bool atBottomBoudary = maxY >= sy; // Select specialized version of code according to distance of this // threadblock's pixels from image boundary. if(atBottomBoudary) { // near bottom boundary => check both writing and reading fdwt53.transform<true, true>(in, out, sx, sy, steps); } else if(atRightBoudary) { // near right boundary only => check writing only fdwt53.transform<false, true>(in, out, sx, sy, steps); } else { // no nearby boundary => check nothing fdwt53.transform<false, false>(in, out, sx, sy, steps); } } // } }; // end of class FDWT53 /// Main GPU 5/3 FDWT entry point. /// @tparam WIN_SX width of sliding window to be used /// @tparam WIN_SY height of sliding window to be used /// @param input input image /// @param output output buffer /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param winSteps number of sliding window steps template <int WIN_SX, int WIN_SY> __launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT53<WIN_SX, WIN_SY>), 8)) __global__ void fdwt53Kernel(const int * const input, int * const output, const int sizeX, const int sizeY, const int winSteps) { FDWT53<WIN_SX, WIN_SY>::run(input, output, sizeX, sizeY, winSteps); } /// Only computes optimal number of sliding window steps, /// number of threadblocks and then lanches the 5/3 FDWT kernel. /// @tparam WIN_SX width of sliding window /// @tparam WIN_SY height of sliding window /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image template <int WIN_SX, int WIN_SY> void launchFDWT53Kernel (int * in, int * out, int sx, int sy) { // compute optimal number of steps of each sliding window const int steps = divRndUp(sy, 15 * WIN_SY); int gx = divRndUp(sx, WIN_SX); int gy = divRndUp(sy, WIN_SY * steps); printf("\n sliding steps = %d , gx = %d , gy = %d \n", steps, gx, gy); // prepare grid size dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps)); // printf("\n globalx=%d, globaly=%d, blocksize=%d\n", gSize.x, gSize.y, WIN_SX); // run kernel, possibly measure time and finally check the call // PERF_BEGIN for(int iter = 0; iter < 500000; iter++){ hipLaunchKernelGGL(( fdwt53Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps); } // PERF_END(" FDWT53", sx, sy) // CudaDWTTester::checkLastKernelCall("FDWT 5/3 kernel"); printf("fdwt53Kernel in launchFDWT53Kernel has finished"); } /// Forward 5/3 2D DWT. See common rules (above) for more details. /// @param in Expected to be normalized into range [-128, 127]. /// Will not be preserved (will be overwritten). /// @param out output buffer on GPU /// @param sizeX width of input image (in pixels) /// @param sizeY height of input image (in pixels) /// @param levels number of recursive DWT levels void fdwt53(int * in, int * out, int sizeX, int sizeY, int levels) { // select right width of kernel for the size of the image if(sizeX >= 960) { launchFDWT53Kernel<192, 8>(in, out, sizeX, sizeY); } else if (sizeX >= 480) { launchFDWT53Kernel<128, 8>(in, out, sizeX, sizeY); } else { launchFDWT53Kernel<64, 8>(in, out, sizeX, sizeY); } // if this was not the last level, continue recursively with other levels if(levels > 1) { // copy output's LL band back into input buffer const int llSizeX = divRndUp(sizeX, 2); const int llSizeY = divRndUp(sizeY, 2); // printf("\n llSizeX = %d , llSizeY = %d \n", llSizeX, llSizeY); memCopy(in, out, llSizeX, llSizeY); //the function memCopy in cuda_dwt/common.h line 238 // run remaining levels of FDWT fdwt53(in, out, llSizeX, llSizeY, levels - 1); } } } // end of namespace dwt_cuda
0e1c4860cc3a9433bc23d146c50c92efcc8e9313.cu
/// @file fdwt53.cu /// @brief CUDA implementation of forward 5/3 2D DWT. /// @author Martin Jirman ([email protected]) /// @date 2011-02-04 13:23 /// /// /// Copyright (c) 2011 Martin Jirman /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE /// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE /// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR /// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF /// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN /// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) /// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. /// #include "common.h" #include "transform_buffer.h" #include "io.h" namespace dwt_cuda { /// Wraps buffer and methods needed for computing one level of 5/3 FDWT /// using sliding window approach. /// @tparam WIN_SIZE_X width of sliding window /// @tparam WIN_SIZE_Y height of sliding window template <int WIN_SIZE_X, int WIN_SIZE_Y> class FDWT53 { private: /// Info needed for processing of one input column. /// @tparam CHECKED_LOADER true if column's loader should check boundaries /// false if there are no near boudnaries to check template <bool CHECKED_LOADER> struct FDWT53Column { /// loader for the column VerticalDWTPixelLoader<int, CHECKED_LOADER> loader; /// offset of the column in shared buffer int offset; // backup of first 3 loaded pixels (not transformed) int pixel0, pixel1, pixel2; /// Sets all fields to anything to prevent 'uninitialized' warnings. __device__ void clear() { offset = pixel0 = pixel1 = pixel2 = 0; loader.clear(); } }; /// Type of shared memory buffer for 5/3 FDWT transforms. typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> FDWT53Buffer; /// Actual shared buffer used for forward 5/3 DWT. FDWT53Buffer buffer; /// Difference between indices of two vertical neighbors in buffer. enum { STRIDE = FDWT53Buffer::VERTICAL_STRIDE }; /// Forward 5/3 DWT predict operation. struct Forward53Predict { __device__ void operator() (const int p, int & c, const int n) const { // c = n; c -= (p + n) / 2; // F.8, page 126, ITU-T Rec. T.800 final draft the real one } }; /// Forward 5/3 DWT update operation. struct Forward53Update { __device__ void operator() (const int p, int & c, const int n) const { c += (p + n + 2) / 4; // F.9, page 126, ITU-T Rec. T.800 final draft } }; /// Initializes one column: computes offset of the column in shared memory /// buffer, initializes loader and finally uses it to load first 3 pixels. /// @tparam CHECKED true if loader of the column checks boundaries /// @param column (uninitialized) column info to be initialized /// @param input input image /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param colIndex x-axis coordinate of the column (relative to the left /// side of this threadblock's block of input pixels) /// @param firstY y-axis coordinate of first image row to be transformed template <bool CHECKED> __device__ void initColumn(FDWT53Column<CHECKED> & column, const int * const input, const int sizeX, const int sizeY, const int colIndex, const int firstY) { // get offset of the column with index 'cId' column.offset = buffer.getColumnOffset(colIndex); // coordinates of the first pixel to be loaded const int firstX = blockIdx.x * WIN_SIZE_X + colIndex; if(blockIdx.y == 0) { // topmost block - apply mirroring rules when loading first 3 rows column.loader.init(sizeX, sizeY, firstX, firstY); // load pixels in mirrored way column.pixel2 = column.loader.loadFrom(input); // loaded pixel #0 column.pixel1 = column.loader.loadFrom(input); // loaded pixel #1 column.pixel0 = column.loader.loadFrom(input); // loaded pixel #2 // reinitialize loader to start with pixel #1 again column.loader.init(sizeX, sizeY, firstX, firstY + 1); } else { // non-topmost row - regular loading: column.loader.init(sizeX, sizeY, firstX, firstY - 2); // load 3 rows into the column column.pixel0 = column.loader.loadFrom(input); column.pixel1 = column.loader.loadFrom(input); column.pixel2 = column.loader.loadFrom(input); // Now, the next pixel, which will be loaded by loader, is pixel #1. } } /// Loads and vertically transforms given column. Assumes that first 3 /// pixels are already loaded in column fields pixel0 ... pixel2. /// @tparam CHECKED true if loader of the column checks boundaries /// @param column column to be loaded and vertically transformed /// @param input pointer to input image data template <bool CHECKED> __device__ void loadAndVerticallyTransform(FDWT53Column<CHECKED> & column, const int * const input) { // take 3 loaded pixels and put them into shared memory transform buffer buffer[column.offset + 0 * STRIDE] = column.pixel0; buffer[column.offset + 1 * STRIDE] = column.pixel1; buffer[column.offset + 2 * STRIDE] = column.pixel2; // load remaining pixels to be able to vertically transform the window for(int i = 3; i < (3 + WIN_SIZE_Y); i++) { buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input); } // remember last 3 pixels for use in next iteration column.pixel0 = buffer[column.offset + (WIN_SIZE_Y + 0) * STRIDE]; column.pixel1 = buffer[column.offset + (WIN_SIZE_Y + 1) * STRIDE]; column.pixel2 = buffer[column.offset + (WIN_SIZE_Y + 2) * STRIDE]; // vertically transform the column in transform buffer buffer.forEachVerticalOdd(column.offset, Forward53Predict()); buffer.forEachVerticalEven(column.offset, Forward53Update()); } /// Actual implementation of 5/3 FDWT. /// @tparam CHECK_LOADS true if input loader must check boundaries /// @tparam CHECK_WRITES true if output writer must check boundaries /// @param in input image /// @param out output buffer /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param winSteps number of sliding window steps template <bool CHECK_LOADS, bool CHECK_WRITES> __device__ void transform(const int * const in, int * const out, const int sizeX, const int sizeY, const int winSteps) { // info about one main and one boundary columns processed by this thread FDWT53Column<CHECK_LOADS> column; FDWT53Column<CHECK_LOADS> boundaryColumn; // only few threads use this // Initialize all column info: initialize loaders, compute offset of // column in shared buffer and initialize loader of column. const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps; initColumn(column, in, sizeX, sizeY, threadIdx.x, firstY); //has been checked Mar 9th // first 3 threads initialize boundary columns, others do not use them boundaryColumn.clear(); if(threadIdx.x < 3) { // index of boundary column (relative x-axis coordinate of the column) const int colId = threadIdx.x + ((threadIdx.x == 0) ? WIN_SIZE_X : -3); // initialize the column initColumn(boundaryColumn, in, sizeX, sizeY, colId, firstY); } // index of column which will be written into output by this thread const int outColumnIndex = parityIdx<WIN_SIZE_X>(); // offset of column which will be written by this thread into output const int outColumnOffset = buffer.getColumnOffset(outColumnIndex); // initialize output writer for this thread const int outputFirstX = blockIdx.x * WIN_SIZE_X + outColumnIndex; VerticalDWTBandWriter<int, CHECK_WRITES> writer; writer.init(sizeX, sizeY, outputFirstX, firstY); // Sliding window iterations: // Each iteration assumes that first 3 pixels of each column are loaded. for(int w = 0; w < winSteps; w++) { // For each column (including boundary columns): load and vertically // transform another WIN_SIZE_Y lines. loadAndVerticallyTransform(column, in); if(threadIdx.x < 3) { loadAndVerticallyTransform(boundaryColumn, in); } // wait for all columns to be vertically transformed and transform all // output rows horizontally __syncthreads(); buffer.forEachHorizontalOdd(2, WIN_SIZE_Y, Forward53Predict()); __syncthreads(); buffer.forEachHorizontalEven(2, WIN_SIZE_Y, Forward53Update()); // wait for all output rows to be transformed horizontally and write // them into output buffer __syncthreads(); for(int r = 2; r < (2 + WIN_SIZE_Y); r += 2) { // Write low coefficients from output column into low band ... writer.writeLowInto(out, buffer[outColumnOffset + r * STRIDE]); // ... and high coeficients into the high band. writer.writeHighInto(out, buffer[outColumnOffset + (r+1) * STRIDE]); } // before proceeding to next iteration, wait for all output columns // to be written into the output __syncthreads(); } } public: /// Determines, whether this block's pixels touch boundary and selects /// right version of algorithm according to it - for many threadblocks, it /// selects version which does not deal with boundary mirroring and thus is /// slightly faster. /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image /// @param steps number of sliding window steps __device__ static void run(const int * const in, int * const out, const int sx, const int sy, const int steps) { // if(blockIdx.x==0 && blockIdx.y ==11 && threadIdx.x >=0&&threadIdx.x <64){ // object with transform buffer in shared memory __shared__ FDWT53<WIN_SIZE_X, WIN_SIZE_Y> fdwt53; // Compute limits of this threadblock's block of pixels and use them to // determine, whether this threadblock will have to deal with boundary. // (1 in next expressions is for radius of impulse response of 9/7 FDWT.) const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1; const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1; const bool atRightBoudary = maxX >= sx; const bool atBottomBoudary = maxY >= sy; // Select specialized version of code according to distance of this // threadblock's pixels from image boundary. if(atBottomBoudary) { // near bottom boundary => check both writing and reading fdwt53.transform<true, true>(in, out, sx, sy, steps); } else if(atRightBoudary) { // near right boundary only => check writing only fdwt53.transform<false, true>(in, out, sx, sy, steps); } else { // no nearby boundary => check nothing fdwt53.transform<false, false>(in, out, sx, sy, steps); } } // } }; // end of class FDWT53 /// Main GPU 5/3 FDWT entry point. /// @tparam WIN_SX width of sliding window to be used /// @tparam WIN_SY height of sliding window to be used /// @param input input image /// @param output output buffer /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param winSteps number of sliding window steps template <int WIN_SX, int WIN_SY> __launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT53<WIN_SX, WIN_SY>), 8)) __global__ void fdwt53Kernel(const int * const input, int * const output, const int sizeX, const int sizeY, const int winSteps) { FDWT53<WIN_SX, WIN_SY>::run(input, output, sizeX, sizeY, winSteps); } /// Only computes optimal number of sliding window steps, /// number of threadblocks and then lanches the 5/3 FDWT kernel. /// @tparam WIN_SX width of sliding window /// @tparam WIN_SY height of sliding window /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image template <int WIN_SX, int WIN_SY> void launchFDWT53Kernel (int * in, int * out, int sx, int sy) { // compute optimal number of steps of each sliding window const int steps = divRndUp(sy, 15 * WIN_SY); int gx = divRndUp(sx, WIN_SX); int gy = divRndUp(sy, WIN_SY * steps); printf("\n sliding steps = %d , gx = %d , gy = %d \n", steps, gx, gy); // prepare grid size dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps)); // printf("\n globalx=%d, globaly=%d, blocksize=%d\n", gSize.x, gSize.y, WIN_SX); // run kernel, possibly measure time and finally check the call // PERF_BEGIN for(int iter = 0; iter < 500000; iter++){ fdwt53Kernel<WIN_SX, WIN_SY><<<gSize, WIN_SX>>>(in, out, sx, sy, steps); } // PERF_END(" FDWT53", sx, sy) // CudaDWTTester::checkLastKernelCall("FDWT 5/3 kernel"); printf("fdwt53Kernel in launchFDWT53Kernel has finished"); } /// Forward 5/3 2D DWT. See common rules (above) for more details. /// @param in Expected to be normalized into range [-128, 127]. /// Will not be preserved (will be overwritten). /// @param out output buffer on GPU /// @param sizeX width of input image (in pixels) /// @param sizeY height of input image (in pixels) /// @param levels number of recursive DWT levels void fdwt53(int * in, int * out, int sizeX, int sizeY, int levels) { // select right width of kernel for the size of the image if(sizeX >= 960) { launchFDWT53Kernel<192, 8>(in, out, sizeX, sizeY); } else if (sizeX >= 480) { launchFDWT53Kernel<128, 8>(in, out, sizeX, sizeY); } else { launchFDWT53Kernel<64, 8>(in, out, sizeX, sizeY); } // if this was not the last level, continue recursively with other levels if(levels > 1) { // copy output's LL band back into input buffer const int llSizeX = divRndUp(sizeX, 2); const int llSizeY = divRndUp(sizeY, 2); // printf("\n llSizeX = %d , llSizeY = %d \n", llSizeX, llSizeY); memCopy(in, out, llSizeX, llSizeY); //the function memCopy in cuda_dwt/common.h line 238 // run remaining levels of FDWT fdwt53(in, out, llSizeX, llSizeY, levels - 1); } } } // end of namespace dwt_cuda
8901666a036610d357390e9ef7c43b866076c2cf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gauss_elimination_cuda_new.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a_d = NULL; hipMalloc(&a_d, XSIZE*YSIZE); float *b_d = NULL; hipMalloc(&b_d, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gauss_elimination_cuda_new), dim3(gridBlock),dim3(threadBlock), 0, 0, a_d,b_d,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gauss_elimination_cuda_new), dim3(gridBlock),dim3(threadBlock), 0, 0, a_d,b_d,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gauss_elimination_cuda_new), dim3(gridBlock),dim3(threadBlock), 0, 0, a_d,b_d,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8901666a036610d357390e9ef7c43b866076c2cf.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gauss_elimination_cuda_new.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a_d = NULL; cudaMalloc(&a_d, XSIZE*YSIZE); float *b_d = NULL; cudaMalloc(&b_d, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gauss_elimination_cuda_new<<<gridBlock,threadBlock>>>(a_d,b_d,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gauss_elimination_cuda_new<<<gridBlock,threadBlock>>>(a_d,b_d,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gauss_elimination_cuda_new<<<gridBlock,threadBlock>>>(a_d,b_d,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c79de6d943e2b0ef1144c968b97c67c5bea7c819.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> #include <vector> using namespace std; const int GPUs[] = {0,1,2}; // If left blank all available GPUs will be used. vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int)); void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<hipEvent_t> &start, vector<hipEvent_t> &stop, hipStream_t stream[]) { for (int i=0; i<g.size(); i++) { hipSetDevice(g[i]); for (int j=0; j<g.size(); j++) { int access; hipMalloc(&buffer_s[i], size); hipMalloc(&buffer_d[i], size); hipEventCreate(&start[i]); hipEventCreate(&stop[i]); hipStreamCreate(&stream[i*g.size()+j]); if (i!=j) { hipDeviceCanAccessPeer(&access, g[i], g[j]); if (access) { hipSetDevice(g[i]); hipDeviceEnablePeerAccess(g[j], 0); hipDeviceSynchronize(); hipSetDevice(g[j]); hipDeviceEnablePeerAccess(g[i], 0); hipDeviceSynchronize(); } } } } } void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<hipEvent_t> &start, vector<hipEvent_t> &stop, hipStream_t stream[]) { float time_taken[g.size()*g.size()], bw[g.size()*g.size()]; printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n"); configure(size, buffer_s, buffer_d, start, stop, stream); for (int i=0; i<g.size(); i++) { for (int j=0; j<g.size(); j++) { if (i!=j) { printf("Copying from %d to %d\n", g[i], g[j]); hipEventRecord(start[i]); hipMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size); hipEventRecord(stop[i]); hipEventSynchronize(stop[i]); hipDeviceSynchronize(); float time_ms; hipEventElapsedTime(&time_ms,start[i],stop[i]); time_taken[i*g.size()+j] = time_ms*1e3; bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30); } } } printf("Time(ms) spent in memcpy\n"); printf(" D\\D"); for (int j=0; j<g.size(); j++) printf("%10d ", g[j]); printf("\n"); for (int i=0; i<g.size(); i++) { printf("%6d", g[i]); for (int j=0; j<g.size(); j++) { if (i==j) printf("%12.2f", 0.0); else printf("%12.2f", time_taken[i*g.size()+j]); } printf("\n"); } printf("bandwidth(Gbps) utilized during memcpy\n"); printf(" D\\D"); for (int j=0; j<g.size(); j++) printf("%10d ", g[j]); printf("\n"); for (int i=0; i<g.size(); i++) { printf("%6d", g[i]); for (int j=0; j<g.size(); j++) if (i==j) printf("%12.2f", 0.0); else printf("%12.2f", bw[i*g.size()+j]); printf("\n"); } } void cyclic_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<hipEvent_t> &start, vector<hipEvent_t> &stop, hipStream_t stream[]) { float bw[g.size()], time_taken[g.size()]; printf("\nCyclic Memory Transfers: 0->1->2->3...n->0\n"); configure(size, buffer_s, buffer_d, start, stop, stream); for (int i=0; i<g.size(); i++) { hipEventRecord(start[i], stream[i]); hipMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[(i+1)%g.size()], g[(i+1)%g.size()], size, stream[i]); hipEventRecord(stop[i], stream[i]); } for (int i=0; i<g.size(); i++) { hipEventSynchronize(stop[i]); float time_ms; hipEventElapsedTime(&time_ms,start[i],stop[i]); time_taken[i] = time_ms*1e3; bw[i] = (float)size*1000/time_ms/(1<<30); } printf("\nTime spent in memcpy\n"); for (int i=0; i<g.size(); i++) printf("GPU%d -> GPU%d: %3.5f\n", g[i], g[(i+1)%g.size()], time_taken[i]); printf("\nBandwidth(Gbps) utilized in memcpy\n"); for (int i=0; i<g.size(); i++) printf("GPU%d -> GPU%d: %3.5f\n", g[i], g[(i+1)%g.size()], bw[i]); } void burst_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<hipEvent_t> &start, vector<hipEvent_t> &stop, hipStream_t stream[]) { float bw[g.size()], time_taken[g.size()]; printf("\nBurst copy: Every GPU is memcpy-ing to every other GPU\n"); printf("%4d%8d%12s\n%4s%8s%12s\n%4s%8s%12s\n", 1, 2,"n","^","^", "^","|","|","|"); printf("3<-0->2 4<-1->3 ... %s<-%s->%s\n","q", "m", "p"); printf("%4s%8s%12s\n%4s%8s%12s\n%4d%8d%12s\n\n", "|","|","|","v","v", "v",4,0,"r"); configure(size, buffer_s, buffer_d, start, stop, stream); for (int i=0; i<g.size(); i++) { hipEventRecord(start[i]); for (int j=0; j<g.size(); j++) if (i!=j) hipMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size, stream[i*g.size()+j]); hipEventRecord(stop[i]); } for (int i=0; i<g.size(); i++) { hipEventSynchronize(stop[i]); float time_ms; hipEventElapsedTime(&time_ms,start[i],stop[i]); time_taken[i] = time_ms*1e3; bw[i] = (float)size*1000/time_ms/(1<<30); } printf("\t\tTime(ms)\tBandwidth(Gbps)\n"); for (int i=0; i<g.size(); i++) printf("GPU%d\t\t%6.2f\t\t%6.2f\n",g[i], time_taken[i], bw[i]); } void perf_analyze(size_t size) { vector<int*> buffer_s(g.size()); vector<int*> buffer_d(g.size()); vector<hipEvent_t> start(g.size()); vector<hipEvent_t> stop(g.size()); hipStream_t stream[g.size() * g.size()]; configure(size, buffer_s, buffer_d, start, stop, stream); // Blocked blocked_copy(size, buffer_s, buffer_d, start, stop, stream); // Cyclic cyclic_copy(size, buffer_s, buffer_d, start, stop, stream); // Burst burst_copy(size, buffer_s, buffer_d, start, stop, stream); } int main(int argc, char** argv) { // NVLink D<->D performance size_t size = (1<<30); if (!g.size()) { int n; printf("Using all 8 GPUs\n"); hipGetDeviceCount(&n); for (int i=0; i<n; i++) g.push_back(i); } //define size perf_analyze(size); return 0; }
c79de6d943e2b0ef1144c968b97c67c5bea7c819.cu
#include <stdio.h> #include <cuda_runtime.h> #include <time.h> #include <vector> using namespace std; const int GPUs[] = {0,1,2}; // If left blank all available GPUs will be used. vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int)); void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop, cudaStream_t stream[]) { for (int i=0; i<g.size(); i++) { cudaSetDevice(g[i]); for (int j=0; j<g.size(); j++) { int access; cudaMalloc(&buffer_s[i], size); cudaMalloc(&buffer_d[i], size); cudaEventCreate(&start[i]); cudaEventCreate(&stop[i]); cudaStreamCreate(&stream[i*g.size()+j]); if (i!=j) { cudaDeviceCanAccessPeer(&access, g[i], g[j]); if (access) { cudaSetDevice(g[i]); cudaDeviceEnablePeerAccess(g[j], 0); cudaDeviceSynchronize(); cudaSetDevice(g[j]); cudaDeviceEnablePeerAccess(g[i], 0); cudaDeviceSynchronize(); } } } } } void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop, cudaStream_t stream[]) { float time_taken[g.size()*g.size()], bw[g.size()*g.size()]; printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n"); configure(size, buffer_s, buffer_d, start, stop, stream); for (int i=0; i<g.size(); i++) { for (int j=0; j<g.size(); j++) { if (i!=j) { printf("Copying from %d to %d\n", g[i], g[j]); cudaEventRecord(start[i]); cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size); cudaEventRecord(stop[i]); cudaEventSynchronize(stop[i]); cudaDeviceSynchronize(); float time_ms; cudaEventElapsedTime(&time_ms,start[i],stop[i]); time_taken[i*g.size()+j] = time_ms*1e3; bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30); } } } printf("Time(ms) spent in memcpy\n"); printf(" D\\D"); for (int j=0; j<g.size(); j++) printf("%10d ", g[j]); printf("\n"); for (int i=0; i<g.size(); i++) { printf("%6d", g[i]); for (int j=0; j<g.size(); j++) { if (i==j) printf("%12.2f", 0.0); else printf("%12.2f", time_taken[i*g.size()+j]); } printf("\n"); } printf("bandwidth(Gbps) utilized during memcpy\n"); printf(" D\\D"); for (int j=0; j<g.size(); j++) printf("%10d ", g[j]); printf("\n"); for (int i=0; i<g.size(); i++) { printf("%6d", g[i]); for (int j=0; j<g.size(); j++) if (i==j) printf("%12.2f", 0.0); else printf("%12.2f", bw[i*g.size()+j]); printf("\n"); } } void cyclic_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop, cudaStream_t stream[]) { float bw[g.size()], time_taken[g.size()]; printf("\nCyclic Memory Transfers: 0->1->2->3...n->0\n"); configure(size, buffer_s, buffer_d, start, stop, stream); for (int i=0; i<g.size(); i++) { cudaEventRecord(start[i], stream[i]); cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[(i+1)%g.size()], g[(i+1)%g.size()], size, stream[i]); cudaEventRecord(stop[i], stream[i]); } for (int i=0; i<g.size(); i++) { cudaEventSynchronize(stop[i]); float time_ms; cudaEventElapsedTime(&time_ms,start[i],stop[i]); time_taken[i] = time_ms*1e3; bw[i] = (float)size*1000/time_ms/(1<<30); } printf("\nTime spent in memcpy\n"); for (int i=0; i<g.size(); i++) printf("GPU%d -> GPU%d: %3.5f\n", g[i], g[(i+1)%g.size()], time_taken[i]); printf("\nBandwidth(Gbps) utilized in memcpy\n"); for (int i=0; i<g.size(); i++) printf("GPU%d -> GPU%d: %3.5f\n", g[i], g[(i+1)%g.size()], bw[i]); } void burst_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d, vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop, cudaStream_t stream[]) { float bw[g.size()], time_taken[g.size()]; printf("\nBurst copy: Every GPU is memcpy-ing to every other GPU\n"); printf("%4d%8d%12s\n%4s%8s%12s\n%4s%8s%12s\n", 1, 2,"n","^","^", "^","|","|","|"); printf("3<-0->2 4<-1->3 ... %s<-%s->%s\n","q", "m", "p"); printf("%4s%8s%12s\n%4s%8s%12s\n%4d%8d%12s\n\n", "|","|","|","v","v", "v",4,0,"r"); configure(size, buffer_s, buffer_d, start, stop, stream); for (int i=0; i<g.size(); i++) { cudaEventRecord(start[i]); for (int j=0; j<g.size(); j++) if (i!=j) cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size, stream[i*g.size()+j]); cudaEventRecord(stop[i]); } for (int i=0; i<g.size(); i++) { cudaEventSynchronize(stop[i]); float time_ms; cudaEventElapsedTime(&time_ms,start[i],stop[i]); time_taken[i] = time_ms*1e3; bw[i] = (float)size*1000/time_ms/(1<<30); } printf("\t\tTime(ms)\tBandwidth(Gbps)\n"); for (int i=0; i<g.size(); i++) printf("GPU%d\t\t%6.2f\t\t%6.2f\n",g[i], time_taken[i], bw[i]); } void perf_analyze(size_t size) { vector<int*> buffer_s(g.size()); vector<int*> buffer_d(g.size()); vector<cudaEvent_t> start(g.size()); vector<cudaEvent_t> stop(g.size()); cudaStream_t stream[g.size() * g.size()]; configure(size, buffer_s, buffer_d, start, stop, stream); // Blocked blocked_copy(size, buffer_s, buffer_d, start, stop, stream); // Cyclic cyclic_copy(size, buffer_s, buffer_d, start, stop, stream); // Burst burst_copy(size, buffer_s, buffer_d, start, stop, stream); } int main(int argc, char** argv) { // NVLink D<->D performance size_t size = (1<<30); if (!g.size()) { int n; printf("Using all 8 GPUs\n"); cudaGetDeviceCount(&n); for (int i=0; i<n; i++) g.push_back(i); } //define size perf_analyze(size); return 0; }
474244f285fcb94c3017d7773ffc465e4355b52b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "chTimer.h" int main(int argc, char *argv[]) { int *dmem, *hpage, *hpin; int size = 1024; // 1kB chTimerTimestamp start, stop; double microseconds; for (int i=0; i<21; i++) { hipMalloc((void**)&dmem, size); // menory on device hpage = (int*) malloc(size); // pageable memory on host hipHostMalloc((void**)&hpin, size); // pinned memory on host chTimerGetTime( &start ); hipMemcpy( dmem, hpage, size, hipMemcpyHostToDevice ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; H2D; pageable; %.2f us\n", size/1024, microseconds); fflush(stdout); chTimerGetTime( &start ); hipMemcpy( dmem, hpin, size, hipMemcpyHostToDevice ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; H2D; pinned; %.2f us\n", size/1024, microseconds); fflush(stdout); chTimerGetTime( &start ); hipMemcpy( hpage, dmem, size, hipMemcpyDeviceToHost ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; D2H; peagable; %.2f us\n", size/1024, microseconds); fflush(stdout); chTimerGetTime( &start ); hipMemcpy( hpin, dmem, size, hipMemcpyDeviceToHost ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; D2H; pinned; %.2f us\n", size/1024, microseconds); fflush(stdout); hipFree(dmem); free(hpage); hipHostFree(hpin); size = size*2; // double the size } }
474244f285fcb94c3017d7773ffc465e4355b52b.cu
#include <stdio.h> #include "chTimer.h" int main(int argc, char *argv[]) { int *dmem, *hpage, *hpin; int size = 1024; // 1kB chTimerTimestamp start, stop; double microseconds; for (int i=0; i<21; i++) { cudaMalloc((void**)&dmem, size); // menory on device hpage = (int*) malloc(size); // pageable memory on host cudaMallocHost((void**)&hpin, size); // pinned memory on host chTimerGetTime( &start ); cudaMemcpy( dmem, hpage, size, cudaMemcpyHostToDevice ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; H2D; pageable; %.2f us\n", size/1024, microseconds); fflush(stdout); chTimerGetTime( &start ); cudaMemcpy( dmem, hpin, size, cudaMemcpyHostToDevice ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; H2D; pinned; %.2f us\n", size/1024, microseconds); fflush(stdout); chTimerGetTime( &start ); cudaMemcpy( hpage, dmem, size, cudaMemcpyDeviceToHost ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; D2H; peagable; %.2f us\n", size/1024, microseconds); fflush(stdout); chTimerGetTime( &start ); cudaMemcpy( hpin, dmem, size, cudaMemcpyDeviceToHost ); chTimerGetTime( &stop ); microseconds = 1e6*chTimerElapsedTime( &start, &stop ); printf("%d kB; D2H; pinned; %.2f us\n", size/1024, microseconds); fflush(stdout); cudaFree(dmem); free(hpage); cudaFreeHost(hpin); size = size*2; // double the size } }
08026a8e616a4b49098fc3ff97e9875de952815d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include "../monodomain/constants.h" #include "model_gpu_utils.h" #include "ten_tusscher_3_RS.h" #define EPI extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 3 GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } // Default values for a healthy cell /////////// real atpi = 6.8f; real Ko = 5.4f; real Ki = 138.3f; real Vm_change = 0.0; real GNa_multiplicator = 1.0f; real GCa_multiplicator = 1.0f; //////////////////////////////////// real *fibrosis_device; real *fibs = NULL; int num_extra_parameters = 6; size_t extra_parameters_size = num_extra_parameters*sizeof(real); real *extra_parameters_device; real fibs_size = num_cells_to_solve*sizeof(real); bool dealocate = false; if(extra_data) { fibs = ((real*)extra_data) + num_extra_parameters; //pointer } else { extra_data = malloc(extra_parameters_size); ((real*)extra_data)[0] = atpi; ((real*)extra_data)[1] = Ko; ((real*)extra_data)[2] = Ki; ((real*)extra_data)[3] = Vm_change; ((real*)extra_data)[4] = GNa_multiplicator; ((real*)extra_data)[5] = GCa_multiplicator; fibs = (real*)calloc(num_cells_to_solve, sizeof(real)); dealocate = true; } check_cuda_error(hipMalloc((void **) &extra_parameters_device, extra_parameters_size)); check_cuda_error(hipMemcpy(extra_parameters_device, extra_data, extra_parameters_size, hipMemcpyHostToDevice)); check_cuda_error(hipMalloc((void **) &fibrosis_device, fibs_size)); check_cuda_error(hipMemcpy(fibrosis_device, fibs, fibs_size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( solve_gpu), dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps, fibrosis_device, extra_parameters_device); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); check_cuda_error(hipFree(fibrosis_device)); check_cuda_error(hipFree(extra_parameters_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); if(dealocate) { free(fibs); free(extra_data); } } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { *((real *) ((char *) sv + pitch * 0) + threadID) = -86.2f; // V; millivolt *((real *) ((char *) sv + pitch * 1) + threadID) = 0.0f; //M *((real *) ((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real *) ((char *) sv + pitch * 3) + threadID) = 0.75; //J *((real *) ((char *) sv + pitch * 4) + threadID) = 0.0f; //Xr1 *((real *) ((char *) sv + pitch * 5) + threadID) = 0.0f; //Xs *((real *) ((char *) sv + pitch * 6) + threadID) = 1.0; //S *((real *) ((char *) sv + pitch * 7) + threadID) = 1.0; //F *((real *) ((char *) sv + pitch * 8) + threadID) = 1.0; //F2 *((real *) ((char *) sv + pitch * 9) + threadID) = 0.0; //D_INF *((real *) ((char *) sv + pitch * 10) + threadID) = 0.0; //R_INF *((real *) ((char *) sv + pitch * 11) + threadID) = 0.0; //Xr2_INF } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps, real *fibrosis, real *extra_parameters) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt, fibrosis[threadID], extra_parameters); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 1; i < 12; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_, real dt, real fibrosis, real *extra_parameters) { //fibrosis = 0 means that the cell is fibrotic, 1 is not fibrotic. Anything between 0 and 1 means border zone const real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); //printf("%lf, %lf, %lf, %lf, %lf\n", atpi, Ko, Ki_multiplicator, acidosis, fibrosis); real atpi = extra_parameters[0]; real Ko = extra_parameters[1]; real Ki = extra_parameters[2]; real Vm_modifier = extra_parameters[3]; real GNa_multiplicator = extra_parameters[4]; real GCa_multiplicator = extra_parameters[5]; Vm_modifier = Vm_modifier - Vm_modifier*fibrosis; //These values are from In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration real svolt_acid = svolt - Vm_modifier; const real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); const real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); const real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); const real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); const real sxs = *((real*)((char*)sv_ + pitch * 5) + threadID_); const real ss = *((real*)((char*)sv_ + pitch * 6) + threadID_); const real sf = *((real*)((char*)sv_ + pitch * 7) + threadID_); const real sf2 = *((real*)((char*)sv_ + pitch * 8) + threadID_); const real D_INF = *((real*)((char*)sv_ + pitch * 9) + threadID_); const real Xr2_INF = *((real*)((char*)sv_ + pitch * 10) + threadID_); const real R_INF = *((real*)((char*)sv_ + pitch * 11) + threadID_); const real natp = 0.24; // K dependence of ATP-sensitive K current const real nicholsarea = 0.00005; // Nichol's areas (cm^2) const real hatp = 2; // Hill coefficient //Extracellular potassium concentration was elevated //from its default value of 5.4 mM to values between 6.0 and 8.0 mM //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry real Ko_change = 5.4f - Ko; Ko = Ko + Ko_change*fibrosis; //Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells) real atpi_change = 6.8f - atpi; atpi = atpi + atpi_change*fibrosis; //real katp = 0.306; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry const real katp = -0.0942857142857f*atpi + 0.683142857143f; const real patp = 1.0f/(1.0f + powf((atpi/katp),hatp)); const real gkatp = 0.000195f/nicholsarea; const real gkbaratp = gkatp*patp*powf((Ko/4),natp); const real katp2= 1.4; const real hatp2 = 2.6; const real pcal = 1.0f/(1.0f + powf((katp2/atpi),hatp2)); const real Cao=2.0; const real Nao=140.0; const real Cai=0.00007; const real Nai=7.67; //This paramter changes with acidosis. //In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //the authors change Ki by multiplying it to 0.863259669. Should we do the same here? //This changes are based on data from rat and guinea pig real Ki_change = 138.3f - Ki; Ki = Ki + Ki_change*fibrosis; real GNa_multiplicator_change = 1.0f - GNa_multiplicator; GNa_multiplicator = GNa_multiplicator + GNa_multiplicator_change*fibrosis; real GCa_multiplicator_change = 1.0f - GCa_multiplicator; GCa_multiplicator = GCa_multiplicator + GCa_multiplicator_change*fibrosis; //printf("Ki = %lf\n", Ki); //Constants const real R = 8314.472; const real F = 96485.3415f; const real T = 310.0; const real RTONF = (R*T)/F; //Parameters for currents //Parameters for IKr const real Gkr=0.101; //Parameters for Iks const real pKNa=0.03; #ifdef EPI const real Gks=0.257; #endif #ifdef ENDO const real Gks=0.392; #endif #ifdef MCELL const real Gks=0.098; #endif //Parameters for Ik1 const real GK1=5.405; //Parameters for Ito #ifdef EPI const real Gto=0.294; #endif #ifdef ENDO const real Gto=0.073; #endif #ifdef MCELL const real Gto=0.294; #endif //Parameters for INa //if acidosis this has to change to 0.75*GNa real GNa=14.838; GNa = GNa*GNa_multiplicator; //Parameters for IbNa const real GbNa=0.00029; //Parameters for INaK const real KmK=1.0; const real KmNa=40.0; const real knak=2.724; //Parameters for ICaL //if acidosis this has to change to 0.75*GCaL real GCaL=0.2786f*pcal; GCaL = GCaL*GCa_multiplicator; //Parameters for IbCa const real GbCa=0.000592; //Parameters for INaCa const real knaca=1000; const real KmNai=87.5; const real KmCa=1.38; const real ksat=0.1; const real n=0.35; //Parameters for IpCa const real GpCa=0.1238; const real KpCa=0.0005; //Parameters for IpK; const real GpK=0.0293; const real Ek=RTONF*(logf((Ko/Ki))); const real Ena=RTONF*(logf((Nao/Nai))); const real Eks=RTONF*(logf((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); const real Eca=0.5f*RTONF*(logf((Cao/Cai))); real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real IKatp; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real Xr1_INF; real Xr2_INF_new; real TAU_Xr1; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF_new; real S_INF; real TAU_S; real Af; real Bf; real Cf; real Af2; real Bf2; real Cf2; real D_INF_new; real TAU_F; real F_INF; real TAU_F2; real F2_INF; real sItot; //Needed to compute currents Ak1=0.1f/(1.0f+expf(0.06f*(svolt-Ek-200.0f))); Bk1=(3.0f*expf(0.0002f*(svolt-Ek+100.0f))+ expf(0.1f*(svolt-Ek-10.0f)))/(1.0f+expf(-0.5f*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1.0f/(1.0f+0.1245f*expf(-0.1f*svolt_acid*F/(R*T))+0.0353f*expf(-svolt_acid*F/(R*T)))); rec_ipK=1.0f/(1.0f+expf((25.0f-svolt)/5.98f)); //According to In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //Vm_acid = Vm -3.4 for all sodium current computation //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt_acid-Ena); ICaL=GCaL*D_INF*sf*sf2*(svolt-60); Ito=Gto*R_INF*ss*(svolt-Ek); IKr=Gkr*sqrtf(Ko/5.4f)*sxr1*Xr2_INF*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1.0f/(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1.0f/(KmCa+Cao))* (1.0f/(1.0f+ksat*expf((n-1.0f)*svolt_acid*F/(R*T))))* (expf(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- expf((n-1.0f)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5f); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt_acid-Ena); IbCa=GbCa*(svolt-Eca); IKatp = gkbaratp*(svolt-Ek); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + IKatp + stim_current; //compute steady state values and time constants AM=1.0f/(1.0f+expf((-60.0f-svolt)/5.0f)); BM=0.1f/(1.0f+expf((svolt+35.0f)/5.0f))+0.10f/(1.0f+expf((svolt-50.0f)/200.0f)); TAU_M=AM*BM; M_INF=1.0f/((1.0f+expf((-56.86f-svolt)/9.03f))*(1.0f+expf((-56.86f-svolt)/9.03f))); if (svolt>=-40.) { AH_1=0.0f; BH_1=(0.77f/(0.13f*(1.0f+expf(-(svolt+10.66f)/11.1f)))); TAU_H= 1.0f/(AH_1+BH_1); } else { AH_2=(0.057f*expf(-(svolt+80.0f)/6.8f)); BH_2=(2.7f*expf(0.079f*svolt)+(3.1e5f)*expf(0.3485f*svolt)); TAU_H=1.0f/(AH_2+BH_2); } H_INF=1.0f/((1.0f+expf((svolt+71.55f)/7.43f))*(1.0f+expf((svolt+71.55f)/7.43f))); if(svolt>=-40.0f) { AJ_1=0.0f; BJ_1=(0.6f*expf((0.057f)*svolt)/(1.0f+expf(-0.1f*(svolt+32.0f)))); TAU_J= 1.0f/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4f)*expf(0.2444f*svolt)-(6.948e-6f)*expf(-0.04391f*svolt))*(svolt+37.78f)/ (1.0f+expf(0.311f*(svolt+79.23f)))); BJ_2=(0.02424f*expf(-0.01052f*svolt)/(1.0f+expf(-0.1378f*(svolt+40.14f)))); TAU_J= 1.0f/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1.0f/(1.0f+expf((-26.0f-svolt)/7.0f)); axr1=450.0f/(1.0f+expf((-45.0f-svolt)/10.0f)); bxr1=6.0f/(1.0f+expf((svolt-(-30.0f))/11.5f)); TAU_Xr1=axr1*bxr1; Xr2_INF_new=1.0f/(1.0f+expf((svolt-(-88.0f))/24.0f)); Xs_INF=1.0f/(1.0f+expf((-5.0f-svolt)/14.0f)); Axs=(1400.0f/(sqrtf(1.0f+expf((5.0f-svolt)/6.0f)))); Bxs=(1.0f/(1.0f+expf((svolt-35.0f)/15.0f))); TAU_Xs=Axs*Bxs+80; #ifdef EPI R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF_new=1.0f/(1.0f+expf((20.0f-svolt)/6.0f)); S_INF=1.0f/(1.0f+expf((svolt+28.0f)/5.0f)); TAU_S=1000.0f*expf(-(svolt+67.0f)*(svolt+67.0f)/1000.0f)+8.0f; #endif #ifdef MCELL R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif D_INF_new=1.0f/(1.0f+expf((-8.0f-svolt)/7.5f)); F_INF=1.0f/(1.0f+expf((svolt+20)/7)); Af=1102.5f*expf(-(svolt+27)*(svolt+27.0f)/225.0f); Bf=200.0f/(1.0f+expf((13.0f-svolt)/10.f)); Cf=(180.0f/(1.0f+expf((svolt+30.0f)/10.0f)))+20.0f; TAU_F=Af+Bf+Cf; F2_INF=0.67f/(1.0f+expf((svolt+35.0f)/7.0f))+0.33f; Af2=600.0f*expf(-(svolt+27.0f)*(svolt+27.0f)/170.0f); Bf2=7.75f/(1.0f+expf((25.0f-svolt)/10.0f)); Cf2=16.0f/(1.0f+expf((svolt+30.0f)/10.0f)); TAU_F2=Af2+Bf2+Cf2; //update voltage rDY_[0] = -sItot; //Update gates rDY_[1] = M_INF-(M_INF-sm)*expf(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*expf(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*expf(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*expf(-dt/TAU_Xr1); rDY_[5] = Xs_INF-(Xs_INF-sxs)*expf(-dt/TAU_Xs); rDY_[6]= S_INF-(S_INF-ss)*expf(-dt/TAU_S); rDY_[7] =F_INF-(F_INF-sf)*expf(-dt/TAU_F); rDY_[8] =F2_INF-(F2_INF-sf2)*expf(-dt/TAU_F2); rDY_[9] = D_INF_new; rDY_[10] = R_INF_new; rDY_[11] = Xr2_INF_new; }
08026a8e616a4b49098fc3ff97e9875de952815d.cu
#include <stddef.h> #include "../monodomain/constants.h" #include "model_gpu_utils.h" #include "ten_tusscher_3_RS.h" #define EPI extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 3 GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } // Default values for a healthy cell /////////// real atpi = 6.8f; real Ko = 5.4f; real Ki = 138.3f; real Vm_change = 0.0; real GNa_multiplicator = 1.0f; real GCa_multiplicator = 1.0f; //////////////////////////////////// real *fibrosis_device; real *fibs = NULL; int num_extra_parameters = 6; size_t extra_parameters_size = num_extra_parameters*sizeof(real); real *extra_parameters_device; real fibs_size = num_cells_to_solve*sizeof(real); bool dealocate = false; if(extra_data) { fibs = ((real*)extra_data) + num_extra_parameters; //pointer } else { extra_data = malloc(extra_parameters_size); ((real*)extra_data)[0] = atpi; ((real*)extra_data)[1] = Ko; ((real*)extra_data)[2] = Ki; ((real*)extra_data)[3] = Vm_change; ((real*)extra_data)[4] = GNa_multiplicator; ((real*)extra_data)[5] = GCa_multiplicator; fibs = (real*)calloc(num_cells_to_solve, sizeof(real)); dealocate = true; } check_cuda_error(cudaMalloc((void **) &extra_parameters_device, extra_parameters_size)); check_cuda_error(cudaMemcpy(extra_parameters_device, extra_data, extra_parameters_size, cudaMemcpyHostToDevice)); check_cuda_error(cudaMalloc((void **) &fibrosis_device, fibs_size)); check_cuda_error(cudaMemcpy(fibrosis_device, fibs, fibs_size, cudaMemcpyHostToDevice)); solve_gpu<<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps, fibrosis_device, extra_parameters_device); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); check_cuda_error(cudaFree(fibrosis_device)); check_cuda_error(cudaFree(extra_parameters_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); if(dealocate) { free(fibs); free(extra_data); } } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { *((real *) ((char *) sv + pitch * 0) + threadID) = -86.2f; // V; millivolt *((real *) ((char *) sv + pitch * 1) + threadID) = 0.0f; //M *((real *) ((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real *) ((char *) sv + pitch * 3) + threadID) = 0.75; //J *((real *) ((char *) sv + pitch * 4) + threadID) = 0.0f; //Xr1 *((real *) ((char *) sv + pitch * 5) + threadID) = 0.0f; //Xs *((real *) ((char *) sv + pitch * 6) + threadID) = 1.0; //S *((real *) ((char *) sv + pitch * 7) + threadID) = 1.0; //F *((real *) ((char *) sv + pitch * 8) + threadID) = 1.0; //F2 *((real *) ((char *) sv + pitch * 9) + threadID) = 0.0; //D_INF *((real *) ((char *) sv + pitch * 10) + threadID) = 0.0; //R_INF *((real *) ((char *) sv + pitch * 11) + threadID) = 0.0; //Xr2_INF } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps, real *fibrosis, real *extra_parameters) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt, fibrosis[threadID], extra_parameters); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 1; i < 12; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_, real dt, real fibrosis, real *extra_parameters) { //fibrosis = 0 means that the cell is fibrotic, 1 is not fibrotic. Anything between 0 and 1 means border zone const real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); //printf("%lf, %lf, %lf, %lf, %lf\n", atpi, Ko, Ki_multiplicator, acidosis, fibrosis); real atpi = extra_parameters[0]; real Ko = extra_parameters[1]; real Ki = extra_parameters[2]; real Vm_modifier = extra_parameters[3]; real GNa_multiplicator = extra_parameters[4]; real GCa_multiplicator = extra_parameters[5]; Vm_modifier = Vm_modifier - Vm_modifier*fibrosis; //These values are from In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration real svolt_acid = svolt - Vm_modifier; const real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); const real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); const real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); const real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); const real sxs = *((real*)((char*)sv_ + pitch * 5) + threadID_); const real ss = *((real*)((char*)sv_ + pitch * 6) + threadID_); const real sf = *((real*)((char*)sv_ + pitch * 7) + threadID_); const real sf2 = *((real*)((char*)sv_ + pitch * 8) + threadID_); const real D_INF = *((real*)((char*)sv_ + pitch * 9) + threadID_); const real Xr2_INF = *((real*)((char*)sv_ + pitch * 10) + threadID_); const real R_INF = *((real*)((char*)sv_ + pitch * 11) + threadID_); const real natp = 0.24; // K dependence of ATP-sensitive K current const real nicholsarea = 0.00005; // Nichol's areas (cm^2) const real hatp = 2; // Hill coefficient //Extracellular potassium concentration was elevated //from its default value of 5.4 mM to values between 6.0 and 8.0 mM //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry real Ko_change = 5.4f - Ko; Ko = Ko + Ko_change*fibrosis; //Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells) real atpi_change = 6.8f - atpi; atpi = atpi + atpi_change*fibrosis; //real katp = 0.306; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry const real katp = -0.0942857142857f*atpi + 0.683142857143f; const real patp = 1.0f/(1.0f + powf((atpi/katp),hatp)); const real gkatp = 0.000195f/nicholsarea; const real gkbaratp = gkatp*patp*powf((Ko/4),natp); const real katp2= 1.4; const real hatp2 = 2.6; const real pcal = 1.0f/(1.0f + powf((katp2/atpi),hatp2)); const real Cao=2.0; const real Nao=140.0; const real Cai=0.00007; const real Nai=7.67; //This paramter changes with acidosis. //In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //the authors change Ki by multiplying it to 0.863259669. Should we do the same here? //This changes are based on data from rat and guinea pig real Ki_change = 138.3f - Ki; Ki = Ki + Ki_change*fibrosis; real GNa_multiplicator_change = 1.0f - GNa_multiplicator; GNa_multiplicator = GNa_multiplicator + GNa_multiplicator_change*fibrosis; real GCa_multiplicator_change = 1.0f - GCa_multiplicator; GCa_multiplicator = GCa_multiplicator + GCa_multiplicator_change*fibrosis; //printf("Ki = %lf\n", Ki); //Constants const real R = 8314.472; const real F = 96485.3415f; const real T = 310.0; const real RTONF = (R*T)/F; //Parameters for currents //Parameters for IKr const real Gkr=0.101; //Parameters for Iks const real pKNa=0.03; #ifdef EPI const real Gks=0.257; #endif #ifdef ENDO const real Gks=0.392; #endif #ifdef MCELL const real Gks=0.098; #endif //Parameters for Ik1 const real GK1=5.405; //Parameters for Ito #ifdef EPI const real Gto=0.294; #endif #ifdef ENDO const real Gto=0.073; #endif #ifdef MCELL const real Gto=0.294; #endif //Parameters for INa //if acidosis this has to change to 0.75*GNa real GNa=14.838; GNa = GNa*GNa_multiplicator; //Parameters for IbNa const real GbNa=0.00029; //Parameters for INaK const real KmK=1.0; const real KmNa=40.0; const real knak=2.724; //Parameters for ICaL //if acidosis this has to change to 0.75*GCaL real GCaL=0.2786f*pcal; GCaL = GCaL*GCa_multiplicator; //Parameters for IbCa const real GbCa=0.000592; //Parameters for INaCa const real knaca=1000; const real KmNai=87.5; const real KmCa=1.38; const real ksat=0.1; const real n=0.35; //Parameters for IpCa const real GpCa=0.1238; const real KpCa=0.0005; //Parameters for IpK; const real GpK=0.0293; const real Ek=RTONF*(logf((Ko/Ki))); const real Ena=RTONF*(logf((Nao/Nai))); const real Eks=RTONF*(logf((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); const real Eca=0.5f*RTONF*(logf((Cao/Cai))); real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real IKatp; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real Xr1_INF; real Xr2_INF_new; real TAU_Xr1; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF_new; real S_INF; real TAU_S; real Af; real Bf; real Cf; real Af2; real Bf2; real Cf2; real D_INF_new; real TAU_F; real F_INF; real TAU_F2; real F2_INF; real sItot; //Needed to compute currents Ak1=0.1f/(1.0f+expf(0.06f*(svolt-Ek-200.0f))); Bk1=(3.0f*expf(0.0002f*(svolt-Ek+100.0f))+ expf(0.1f*(svolt-Ek-10.0f)))/(1.0f+expf(-0.5f*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1.0f/(1.0f+0.1245f*expf(-0.1f*svolt_acid*F/(R*T))+0.0353f*expf(-svolt_acid*F/(R*T)))); rec_ipK=1.0f/(1.0f+expf((25.0f-svolt)/5.98f)); //According to In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //Vm_acid = Vm -3.4 for all sodium current computation //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt_acid-Ena); ICaL=GCaL*D_INF*sf*sf2*(svolt-60); Ito=Gto*R_INF*ss*(svolt-Ek); IKr=Gkr*sqrtf(Ko/5.4f)*sxr1*Xr2_INF*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1.0f/(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1.0f/(KmCa+Cao))* (1.0f/(1.0f+ksat*expf((n-1.0f)*svolt_acid*F/(R*T))))* (expf(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- expf((n-1.0f)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5f); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt_acid-Ena); IbCa=GbCa*(svolt-Eca); IKatp = gkbaratp*(svolt-Ek); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + IKatp + stim_current; //compute steady state values and time constants AM=1.0f/(1.0f+expf((-60.0f-svolt)/5.0f)); BM=0.1f/(1.0f+expf((svolt+35.0f)/5.0f))+0.10f/(1.0f+expf((svolt-50.0f)/200.0f)); TAU_M=AM*BM; M_INF=1.0f/((1.0f+expf((-56.86f-svolt)/9.03f))*(1.0f+expf((-56.86f-svolt)/9.03f))); if (svolt>=-40.) { AH_1=0.0f; BH_1=(0.77f/(0.13f*(1.0f+expf(-(svolt+10.66f)/11.1f)))); TAU_H= 1.0f/(AH_1+BH_1); } else { AH_2=(0.057f*expf(-(svolt+80.0f)/6.8f)); BH_2=(2.7f*expf(0.079f*svolt)+(3.1e5f)*expf(0.3485f*svolt)); TAU_H=1.0f/(AH_2+BH_2); } H_INF=1.0f/((1.0f+expf((svolt+71.55f)/7.43f))*(1.0f+expf((svolt+71.55f)/7.43f))); if(svolt>=-40.0f) { AJ_1=0.0f; BJ_1=(0.6f*expf((0.057f)*svolt)/(1.0f+expf(-0.1f*(svolt+32.0f)))); TAU_J= 1.0f/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4f)*expf(0.2444f*svolt)-(6.948e-6f)*expf(-0.04391f*svolt))*(svolt+37.78f)/ (1.0f+expf(0.311f*(svolt+79.23f)))); BJ_2=(0.02424f*expf(-0.01052f*svolt)/(1.0f+expf(-0.1378f*(svolt+40.14f)))); TAU_J= 1.0f/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1.0f/(1.0f+expf((-26.0f-svolt)/7.0f)); axr1=450.0f/(1.0f+expf((-45.0f-svolt)/10.0f)); bxr1=6.0f/(1.0f+expf((svolt-(-30.0f))/11.5f)); TAU_Xr1=axr1*bxr1; Xr2_INF_new=1.0f/(1.0f+expf((svolt-(-88.0f))/24.0f)); Xs_INF=1.0f/(1.0f+expf((-5.0f-svolt)/14.0f)); Axs=(1400.0f/(sqrtf(1.0f+expf((5.0f-svolt)/6.0f)))); Bxs=(1.0f/(1.0f+expf((svolt-35.0f)/15.0f))); TAU_Xs=Axs*Bxs+80; #ifdef EPI R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF_new=1.0f/(1.0f+expf((20.0f-svolt)/6.0f)); S_INF=1.0f/(1.0f+expf((svolt+28.0f)/5.0f)); TAU_S=1000.0f*expf(-(svolt+67.0f)*(svolt+67.0f)/1000.0f)+8.0f; #endif #ifdef MCELL R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif D_INF_new=1.0f/(1.0f+expf((-8.0f-svolt)/7.5f)); F_INF=1.0f/(1.0f+expf((svolt+20)/7)); Af=1102.5f*expf(-(svolt+27)*(svolt+27.0f)/225.0f); Bf=200.0f/(1.0f+expf((13.0f-svolt)/10.f)); Cf=(180.0f/(1.0f+expf((svolt+30.0f)/10.0f)))+20.0f; TAU_F=Af+Bf+Cf; F2_INF=0.67f/(1.0f+expf((svolt+35.0f)/7.0f))+0.33f; Af2=600.0f*expf(-(svolt+27.0f)*(svolt+27.0f)/170.0f); Bf2=7.75f/(1.0f+expf((25.0f-svolt)/10.0f)); Cf2=16.0f/(1.0f+expf((svolt+30.0f)/10.0f)); TAU_F2=Af2+Bf2+Cf2; //update voltage rDY_[0] = -sItot; //Update gates rDY_[1] = M_INF-(M_INF-sm)*expf(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*expf(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*expf(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*expf(-dt/TAU_Xr1); rDY_[5] = Xs_INF-(Xs_INF-sxs)*expf(-dt/TAU_Xs); rDY_[6]= S_INF-(S_INF-ss)*expf(-dt/TAU_S); rDY_[7] =F_INF-(F_INF-sf)*expf(-dt/TAU_F); rDY_[8] =F2_INF-(F2_INF-sf2)*expf(-dt/TAU_F2); rDY_[9] = D_INF_new; rDY_[10] = R_INF_new; rDY_[11] = Xr2_INF_new; }
dd2b5bc295efd018b41ef0c6dd537ddeb6c5cc77.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <hip/hip_runtime.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef SEGMENT_SIZE #define SEGMENT_SIZE 2 #endif void cudaTest(hipError_t error) { if (error != hipSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } } void print(uint* host_data, uint n, uint m) { std::cout << "\n"; for (uint i = 0; i < n; i++) { for (uint j = 0; j < m; j++) { std::cout << host_data[i * n + j] << "\t"; } std::cout << "\n"; } } //__global__ void bitonic_sort_step(uint *dev_values, int k, int p, int n) { __global__ void block_sorting(uint *d_vec, int n) { uint idx = blockDim.x * blockIdx.x + threadIdx.x; uint idy = blockDim.y * blockIdx.y + threadIdx.y; __shared__ uint As[BLOCK_SIZE][BLOCK_SIZE]; As[threadIdx.x][threadIdx.y] = d_vec[idx * n + idy]; uint i = threadIdx.x; uint j = threadIdx.y; __syncthreads(); for (int k = 2; k <= SEGMENT_SIZE; k <<= 1) { // sorting only segment size row for (int p = k >> 1; p > 0; p = p >> 1) { uint ixp = i ^ p; /* The threads with the lowest ids sort the array. */ if (i < ixp) { //bool up = ((i & k) == 0); // sorting the entire matrix row bool up = ((i & k) == 0); // sorting only block size matrix row // Sort ascending or descending according to up value if ((As[j][i] > As[j][ixp]) == up) { // exchange(i,ixj); uint temp = As[j][i]; As[j][i] = As[j][ixp]; As[j][ixp] = temp; } } __syncthreads(); } } d_vec[idx * n + idy] = As[threadIdx.x][threadIdx.y]; } int main(int argc, char** argv) { uint num_of_elements; scanf("%d", &num_of_elements); int n = num_of_elements; int m = num_of_elements; uint mem_size = sizeof(int) * (n * m); uint *h_vec = (uint *) malloc(mem_size); for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { scanf("%d", &h_vec[i * n + j]); } } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); uint *d_vec; cudaTest(hipMalloc((void **) &d_vec, mem_size)); for (int i = 0; i < EXECUTIONS; i++) { cudaTest(hipMemcpy(d_vec, h_vec, mem_size, hipMemcpyHostToDevice)); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((n - 1) / dimBlock.x + 1, (m - 1) / dimBlock.y + 1); hipEventRecord(start); hipLaunchKernelGGL(( block_sorting), dim3(dimGrid), dim3(dimBlock), 0, 0, d_vec, n); hipEventRecord(stop); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("Async kernel error: %s\n", hipGetErrorString(errAsync)); if (ELAPSED_TIME == 1) { hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } hipDeviceSynchronize(); } hipMemcpy(h_vec, d_vec, mem_size, hipMemcpyDeviceToHost); hipFree(d_vec); if (ELAPSED_TIME != 1) { print(h_vec, n, m); } free(h_vec); return 0; } /* * for (int p = 0; p < logn; p++) { for (int q = 0; q <= p; q++) { int d = 1 << (p-q); //for(int i = 0; i < n; i++) { bool up = ((col >> p) & 2) == 0; if ((col & d) == 0 && (As[row][col] > As[row][col | d]) == up) { int t = As[row][col]; As[row][col] = As[row][col | d]; As[row][col | d] = t; } // } } } */
dd2b5bc295efd018b41ef0c6dd537ddeb6c5cc77.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda.h> #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef SEGMENT_SIZE #define SEGMENT_SIZE 2 #endif void cudaTest(cudaError_t error) { if (error != cudaSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } } void print(uint* host_data, uint n, uint m) { std::cout << "\n"; for (uint i = 0; i < n; i++) { for (uint j = 0; j < m; j++) { std::cout << host_data[i * n + j] << "\t"; } std::cout << "\n"; } } //__global__ void bitonic_sort_step(uint *dev_values, int k, int p, int n) { __global__ void block_sorting(uint *d_vec, int n) { uint idx = blockDim.x * blockIdx.x + threadIdx.x; uint idy = blockDim.y * blockIdx.y + threadIdx.y; __shared__ uint As[BLOCK_SIZE][BLOCK_SIZE]; As[threadIdx.x][threadIdx.y] = d_vec[idx * n + idy]; uint i = threadIdx.x; uint j = threadIdx.y; __syncthreads(); for (int k = 2; k <= SEGMENT_SIZE; k <<= 1) { // sorting only segment size row for (int p = k >> 1; p > 0; p = p >> 1) { uint ixp = i ^ p; /* The threads with the lowest ids sort the array. */ if (i < ixp) { //bool up = ((i & k) == 0); // sorting the entire matrix row bool up = ((i & k) == 0); // sorting only block size matrix row // Sort ascending or descending according to up value if ((As[j][i] > As[j][ixp]) == up) { // exchange(i,ixj); uint temp = As[j][i]; As[j][i] = As[j][ixp]; As[j][ixp] = temp; } } __syncthreads(); } } d_vec[idx * n + idy] = As[threadIdx.x][threadIdx.y]; } int main(int argc, char** argv) { uint num_of_elements; scanf("%d", &num_of_elements); int n = num_of_elements; int m = num_of_elements; uint mem_size = sizeof(int) * (n * m); uint *h_vec = (uint *) malloc(mem_size); for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { scanf("%d", &h_vec[i * n + j]); } } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); uint *d_vec; cudaTest(cudaMalloc((void **) &d_vec, mem_size)); for (int i = 0; i < EXECUTIONS; i++) { cudaTest(cudaMemcpy(d_vec, h_vec, mem_size, cudaMemcpyHostToDevice)); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((n - 1) / dimBlock.x + 1, (m - 1) / dimBlock.y + 1); cudaEventRecord(start); block_sorting<<<dimGrid, dimBlock>>>(d_vec, n); cudaEventRecord(stop); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("Async kernel error: %s\n", cudaGetErrorString(errAsync)); if (ELAPSED_TIME == 1) { cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } cudaDeviceSynchronize(); } cudaMemcpy(h_vec, d_vec, mem_size, cudaMemcpyDeviceToHost); cudaFree(d_vec); if (ELAPSED_TIME != 1) { print(h_vec, n, m); } free(h_vec); return 0; } /* * for (int p = 0; p < logn; p++) { for (int q = 0; q <= p; q++) { int d = 1 << (p-q); //for(int i = 0; i < n; i++) { bool up = ((col >> p) & 2) == 0; if ((col & d) == 0 && (As[row][col] > As[row][col | d]) == up) { int t = As[row][col]; As[row][col] = As[row][col | d]; As[row][col | d] = t; } // } } } */
94729a8fcd962abbb58fa85508a971f45323b7bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __MRLIB_CU__ #define __MRLIB_CU__ #include "MarsInc.h" #include "map.cu" #include "reduce.cu" //---------------------------------------------- //Get default runtime configuration // //return: default spec //---------------------------------------------- Spec_t *GetDefaultSpec() { Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t)); if (NULL == spec) exit(-1); memset(spec, 0, sizeof(Spec_t)); return spec; } //-------------------------------------------------------- //Initiate map reduce spec //-------------------------------------------------------- void InitMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->dimBlockMap <= 0) g_spec->dimBlockMap = DEFAULT_DIMBLOCK; if (g_spec->dimBlockReduce <= 0) g_spec->dimBlockReduce = DEFAULT_DIMBLOCK; if (g_spec->numRecTaskReduce <= 0) g_spec->numRecTaskReduce = DEFAULT_NUMTASK; if (g_spec->numRecTaskMap <= 0) g_spec->numRecTaskMap = DEFAULT_NUMTASK; if (g_spec->workflow <= 0) g_spec->workflow = MAP_ONLY; } //-------------------------------------------------- //Add a map input record // //param : spec //param : key -- a pointer to a buffer //param : val -- a pointer to a buffer //param : keySize //param : valSize //-------------------------------------------------- void AddMapInputRecord(Spec_t* spec, void* key, void* val, int keySize, int valSize) { assert(NULL != spec); static int2 curOffset; static int3 curChunkNum; int index = spec->inputRecordCount; const int dataChunkSize = 1024*1024*256; if (spec->inputRecordCount > 0) { if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize)) spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize); memcpy(spec->inputKeys+curOffset.x, key, keySize); if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize)) spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize); memcpy(spec->inputVals+curOffset.y, val, valSize); if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4)) spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes, (++curChunkNum.z)*dataChunkSize); } else { spec->inputKeys = (char*)malloc(dataChunkSize); if (NULL == spec->inputKeys) exit(-1); memcpy(spec->inputKeys, key, keySize); spec->inputVals = (char*)malloc(dataChunkSize); if (NULL == spec->inputVals) exit(-1); memcpy(spec->inputVals, val, valSize); spec->inputOffsetSizes = (int4*)malloc(dataChunkSize); curChunkNum.x++; curChunkNum.y++; curChunkNum.z++; } spec->inputOffsetSizes[index].x = curOffset.x; spec->inputOffsetSizes[index].y = keySize; spec->inputOffsetSizes[index].z = curOffset.y; spec->inputOffsetSizes[index].w = valSize; curOffset.x += keySize; curOffset.y += valSize; spec->inputRecordCount++; } //------------------------------------------------- //Called by user defined map_count function // //param : keySize //param : valSize //param : interKeysSizePerTask //param : interValsSizePerTask //param : interCountPerTask //------------------------------------------------- __device__ void EmitInterCount(int keySize, int valSize, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask) { int index = TID; interKeysSizePerTask[index] += keySize; interValsSizePerTask[index] += valSize; interCountPerTask[index]++; } //------------------------------------------------- //called by user defined map function // //------------------------------------------------- __device__ void EmitIntermediate(void* key, void* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; int2 l_keyValOffsets = keyValOffsets[index]; char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x); char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y); char* sKey = (char*)key; char* sVal = (char*)val; for (int i = 0; i < keySize; ++i) pKeySet[i] = sKey[i]; for (int i = 0; i < valSize; ++i) pValSet[i] = sVal[i]; l_keyValOffsets.x += keySize; l_keyValOffsets.y += valSize; keyValOffsets[index] = l_keyValOffsets; int l_curIndex = curIndex[index]; int l_psCounts = psCounts[index]; int l_curPs = l_curIndex + l_psCounts; int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs]; int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1]; if (l_curIndex != 0) { l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y); l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w); } l_interOffsetSizes1.y = keySize; l_interOffsetSizes1.w = valSize; interOffsetSizes[l_curPs] = l_interOffsetSizes1; ++l_curIndex; curIndex[index] = l_curIndex; } //------------------------------------------------- //Calculate intermediate data's size // //param : inputKeys //param : inputVals //param : inputOffsetSizes //param : interKeysSizesPerTask //param : interValsSizePerTask //param : interCountPerTask //param : recordNum -- total number of records //param : recordsPerTask //------------------------------------------------- __global__ void MapperCount(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map_count(key, val, offsetSize.y, offsetSize.w, interKeysSizePerTask, interValsSizePerTask, interCountPerTask); } } //-------------------------------------------------- //mapper //-------------------------------------------------- __global__ void Mapper(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; int l_psCounts = psCounts[index]; int4 l_interOffsetSizes = interOffsetSizes[l_psCounts]; l_interOffsetSizes.x = psKeySizes[index]; l_interOffsetSizes.z = psValSizes[index]; interOffsetSizes[l_psCounts] = l_interOffsetSizes; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map(key, val, offsetSize.y, offsetSize.w, psKeySizes, psValSizes, psCounts, keyValOffsets, interKeys, interVals, interOffsetSizes, curIndex); } } //-------------------------------------------------- //start map // //1, get map input data on host //2, upload map input data to device memory // (keys, vals, keyOffsets, valOffsets, keySizes, valSizes) //3, determine the number of threads to run //4, calculate intermediate data keys'buf size // and values' buf size //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //6, allocate intermediate memory on device memory //7, start map //8, free allocated memory //-------------------------------------------------- int startMap(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);} if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); } if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); } if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);} //------------------------------------------------------- //1, get map input data on host //------------------------------------------------------- int h_inputRecordCount = g_spec->inputRecordCount; int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x + g_spec->inputOffsetSizes[h_inputRecordCount-1].y; int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z + g_spec->inputOffsetSizes[h_inputRecordCount-1].w; char* h_inputKeys = g_spec->inputKeys; char* h_inputVals = g_spec->inputVals; int4* h_inputOffsetSizes = g_spec->inputOffsetSizes; DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount); //------------------------------------------------------- //2, upload map input data onto device memory //------------------------------------------------------- DoLog( "** Upload map input data onto device memory"); TimeVal_t uploadTv; startTimer(&uploadTv); char* d_inputKeys = NULL; char* d_inputVals = NULL; int4* d_inputOffsetSizes = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_inputKeys, h_inputKeysBufSize)); CUDA_SAFE_CALL(hipMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMalloc((void**)&d_inputVals, h_inputValsBufSize)); CUDA_SAFE_CALL(hipMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount)); hipMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, hipMemcpyHostToDevice); endTimer("PCI-E I/O", &uploadTv); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockMap,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskMap; int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; TimeVal_t mapTimer; startTimer(&mapTimer); //---------------------------------------------- //4, calculate intermediate data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** MapCount"); int* d_interKeysSizePerTask = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads)); hipMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interValsSizePerTask = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads)); hipMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interCountPerTask = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads)); hipMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads); hipLaunchKernelGGL(( MapperCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_inputKeys, d_inputVals, d_inputOffsetSizes, d_interKeysSizePerTask, d_interValsSizePerTask, d_interCountPerTask, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); //----------------------------------------------- //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on intermediate data's size\n"); int *d_psKeySizes = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads)); int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads)); int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads)); int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts); if (h_allCounts == 0) { DoLog( "** No output."); hipFree(d_inputKeys); hipFree(d_inputVals); hipFree(d_inputOffsetSizes); hipFree(d_psKeySizes); hipFree(d_psValSizes); hipFree(d_psCounts); endTimer("Map", &mapTimer); return 1; } //----------------------------------------------- //6, allocate intermediate memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_interKeys = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_interKeys, h_allKeySize)); hipMemset(d_interKeys, 0, h_allKeySize); char* d_interVals = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_interVals, h_allValSize)); hipMemset(d_interVals, 0, h_allValSize); int4* d_interOffsetSizes = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts)); hipMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts); //-------------------------------------------------- //7, start map //-------------------------------------------------- DoLog( "** Map"); int2* d_keyValOffsets = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads)); hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads)); hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; hipLaunchKernelGGL(( Mapper), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_inputKeys, d_inputVals, d_inputOffsetSizes, d_psKeySizes, d_psValSizes, d_psCounts, d_keyValOffsets, d_interKeys, d_interVals, d_interOffsetSizes, d_curIndex, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); g_spec->interKeys = d_interKeys; g_spec->interVals = d_interVals; g_spec->interOffsetSizes = d_interOffsetSizes; g_spec->interRecordCount = h_allCounts; g_spec->interDiffKeyCount = h_allCounts; g_spec->interAllKeySize = h_allKeySize; g_spec->interAllValSize = h_allValSize; //---------------------------------------------- //8, free //---------------------------------------------- hipFree(d_interKeysSizePerTask); hipFree(d_interValsSizePerTask); hipFree(d_interCountPerTask); hipFree(d_keyValOffsets); hipFree(d_curIndex); hipFree(d_inputKeys); hipFree(d_inputVals); hipFree(d_inputOffsetSizes); hipFree(d_psKeySizes); hipFree(d_psValSizes); hipFree(d_psCounts); endTimer("Map", &mapTimer); return 0; } void startGroup(Spec_t* spec) { Spec_t* g_spec = spec; int interDiffKeyCount = 0; char* d_outputKeys = NULL; char* d_outputVals = NULL; int4* d_outputOffsetSizes = NULL; int2** h_outputKeyListRange = NULL; DoLog( "** Sort for group"); CUDA_SAFE_CALL(hipMalloc((void**)&d_outputKeys, g_spec->interAllKeySize)); CUDA_SAFE_CALL(hipMalloc((void**)&d_outputVals, g_spec->interAllValSize)); CUDA_SAFE_CALL(hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount)); h_outputKeyListRange = (int2**)malloc(sizeof(int2*)); saven_initialPrefixSum(g_spec->interRecordCount); interDiffKeyCount = sort_GPU (g_spec->interKeys, g_spec->interAllKeySize, g_spec->interVals, g_spec->interAllValSize, g_spec->interOffsetSizes, g_spec->interRecordCount, d_outputKeys, d_outputVals, d_outputOffsetSizes, h_outputKeyListRange); DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount); g_spec->interKeys = d_outputKeys; g_spec->interVals = d_outputVals; g_spec->interOffsetSizes = d_outputOffsetSizes; g_spec->interDiffKeyCount = interDiffKeyCount; int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2); CUDA_SAFE_CALL(hipMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize)); CUDA_SAFE_CALL(hipMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, hipMemcpyHostToDevice)); free(*h_outputKeyListRange); free(h_outputKeyListRange); } //-------------------------------------------------------- //get a value from value list of the same key // //param : vals //param : interOffsetSizes //param : index //return: the wanted value //-------------------------------------------------------- __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)vals + keyIndex * offset.w); } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)key + keyIndex * offset.y); } //--------------------------------------------------------- //called by user defined reduce_count function //--------------------------------------------------------- __device__ void EmitCount(int keySize, int valSize, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask) { int index = TID; outputKeysSizePerTask[index] += keySize; outputValsSizePerTask[index] += valSize; outputCountPerTask[index]++; } //--------------------------------------------------------- //called by user defined reduce function //--------------------------------------------------------- __device__ void Emit (char* key, char* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x); char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y); for (int i = 0; i < keySize; i++) pKeySet[i] = key[i]; for (int i = 0; i < valSize; i++) pValSet[i] = val[i]; keyValOffsets[index].x += keySize; keyValOffsets[index].y += valSize; if (curIndex[index] != 0) { outputOffsetSizes[psCounts[index] + curIndex[index]].x = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y); outputOffsetSizes[psCounts[index] + curIndex[index]].z = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w); } outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize; outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize; curIndex[index]++; } //------------------------------------------------------- //calculate output data's size //------------------------------------------------------- __global__ void ReducerCount(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; //for (int i = 0; i <= recordsPerTask; i++) for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce_count(key, vals, keySize, valCount, interOffsetSizes, outputKeysSizePerTask, outputValsSizePerTask, outputCountPerTask); } } //------------------------------------------------------- //Reducer // //------------------------------------------------------- __global__ void Reducer(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* psKeySizes, int* psValSizes, int* psCounts, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int2* keyValOffsets, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; outputOffsetSizes[psCounts[index]].x = psKeySizes[index]; outputOffsetSizes[psCounts[index]].z = psValSizes[index]; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce(key, vals, keySize, valCount, psKeySizes, psValSizes, psCounts, keyValOffsets, interOffsetSizes, outputKeys, outputVals, outputOffsetSizes, curIndex, valStartIndex); } } //---------------------------------------------- //start reduce // //1, if there is not a reduce phase, just return // then user uses spec->interKeys/spec->intervals // for further processing //2, get reduce input data on host //3, upload reduce input data onto device memory //4, determine the number of threads to run //5, calculate output data keys'buf size // and values' buf size //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //7, allocate output memory on device memory //8, start reduce //9, copy output data to Spect_t structure //10,free allocated memory //---------------------------------------------- void startReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);} if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);} if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);} if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);} if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);} if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);} //------------------------------------------------------- //2, get reduce input data on host //------------------------------------------------------- int h_interDiffKeyCount = g_spec->interDiffKeyCount; char* d_interKeys = g_spec->interKeys; char* d_interVals = g_spec->interVals; int4* d_interOffsetSizes = g_spec->interOffsetSizes; int2* d_interKeyListRange = g_spec->interKeyListRange; //---------------------------------------------- //4, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockReduce,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskReduce; int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; //---------------------------------------------- //5, calculate output data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** ReduceCount"); int* d_outputKeysSizePerTask = NULL; hipMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads); hipMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputValsSizePerTask = NULL; hipMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads); hipMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputCountPerTask = NULL; hipMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads); hipMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads); hipLaunchKernelGGL(( ReducerCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_outputKeysSizePerTask, d_outputValsSizePerTask, d_outputCountPerTask, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); //----------------------------------------------- //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on output data's size"); int *d_psKeySizes = NULL; hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads); hipMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads); int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads); hipMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads); int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads); hipMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads); int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts); //----------------------------------------------- //7, allocate output memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_outputKeys = NULL; hipMalloc((void**)&d_outputKeys, h_allKeySize); char* d_outputVals = NULL; hipMalloc((void**)&d_outputVals, h_allValSize); int4* d_outputOffsetSizes = NULL; hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts); //-------------------------------------------------- //8, start reduce //-------------------------------------------------- DoLog( "** Reduce"); int2* d_keyValOffsets = NULL; hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads); hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads); hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; hipLaunchKernelGGL(( Reducer), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_psKeySizes, d_psValSizes, d_psCounts, d_outputKeys, d_outputVals, d_outputOffsetSizes, d_keyValOffsets, d_curIndex, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); //------------------------------------------------------- //9, copy output data to Spec_t structure //------------------------------------------------------- g_spec->outputKeys = d_outputKeys; g_spec->outputVals = d_outputVals; g_spec->outputOffsetSizes = d_outputOffsetSizes; g_spec->outputRecordCount = h_allCounts; g_spec->outputAllKeySize = h_allKeySize; g_spec->outputAllValSize = h_allValSize; //---------------------------------------------- //10, free allocated memory //---------------------------------------------- hipFree(d_interKeys); hipFree(d_interVals); hipFree(d_interOffsetSizes); hipFree(d_outputKeysSizePerTask); hipFree(d_outputValsSizePerTask); hipFree(d_outputCountPerTask); hipFree(d_psKeySizes); hipFree(d_psValSizes); hipFree(d_psCounts); hipFree(d_keyValOffsets); hipFree(d_curIndex); } //---------------------------------------------- //start main map reduce procedure //1, init device //2, start map //3, start reduce // //param : spec //---------------------------------------------- void MapReduce(Spec_t *spec) { assert(NULL != spec); Spec_t* g_spec = spec; DoLog( "=====start map/reduce====="); //------------------------------------------- //1, init device //------------------------------------------- //CUT_DEVICE_INIT(); DoLog( "** init GPU"); InitMapReduce(spec); //------------------------------------------- //2, start map //------------------------------------------- DoLog( "----------start map-----------"); if (startMap(spec)) { printf("** No output."); return; } if (g_spec->workflow == MAP_ONLY) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; goto EXIT_MR; } //------------------------------------------- //3, start group //------------------------------------------- DoLog( "----------start group-----------"); TimeVal_t groupTimer; startTimer(&groupTimer); startGroup(spec); endTimer("Group", &groupTimer); if (g_spec->workflow == MAP_GROUP) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount; if (g_spec->outputToHost == 1) { g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount); CUDA_SAFE_CALL(hipMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(g_spec->interKeyListRange)); } goto EXIT_MR; } //------------------------------------------- //4, start reduce //------------------------------------------- DoLog( "----------start reduce--------"); TimeVal_t reduceTimer; startTimer(&reduceTimer); startReduce(spec); endTimer("Reduce", &reduceTimer); EXIT_MR: if (g_spec->outputToHost == 1) { int indexSize = g_spec->outputRecordCount * sizeof(int4); char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize); if (h_outputKeys == NULL) exit(0); char* h_outputVals = (char*)malloc(g_spec->outputAllValSize); if (h_outputVals == NULL) exit(0); int4* h_outputOffsetSizes = (int4*)malloc(indexSize); if (h_outputOffsetSizes == NULL) exit(0); CUDA_SAFE_CALL(hipMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(g_spec->outputKeys)); CUDA_SAFE_CALL(hipFree(g_spec->outputVals)); CUDA_SAFE_CALL(hipFree(g_spec->outputOffsetSizes)); g_spec->outputKeys = h_outputKeys; g_spec->outputVals = h_outputVals; g_spec->outputOffsetSizes = h_outputOffsetSizes; } } //------------------------------------------ //the last step // //1, free global variables' memory //2, close log file's file pointer //------------------------------------------ void FinishMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; //------------------------------------------- //1, free global variables' memory //------------------------------------------- free(g_spec->inputKeys); free(g_spec->inputVals); free(g_spec->inputOffsetSizes); if (g_spec->outputToHost == 1) { free(g_spec->outputKeys); free(g_spec->outputVals); free(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) free(g_spec->outputKeyListRange); } else { hipFree(g_spec->outputKeys); hipFree(g_spec->outputVals); hipFree(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) hipFree(g_spec->outputKeyListRange); } free(g_spec); DoLog( "=====finish map/reduce====="); } #endif //__MRLIB_CU__
94729a8fcd962abbb58fa85508a971f45323b7bd.cu
#ifndef __MRLIB_CU__ #define __MRLIB_CU__ #include "MarsInc.h" #include "map.cu" #include "reduce.cu" //---------------------------------------------- //Get default runtime configuration // //return: default spec //---------------------------------------------- Spec_t *GetDefaultSpec() { Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t)); if (NULL == spec) exit(-1); memset(spec, 0, sizeof(Spec_t)); return spec; } //-------------------------------------------------------- //Initiate map reduce spec //-------------------------------------------------------- void InitMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->dimBlockMap <= 0) g_spec->dimBlockMap = DEFAULT_DIMBLOCK; if (g_spec->dimBlockReduce <= 0) g_spec->dimBlockReduce = DEFAULT_DIMBLOCK; if (g_spec->numRecTaskReduce <= 0) g_spec->numRecTaskReduce = DEFAULT_NUMTASK; if (g_spec->numRecTaskMap <= 0) g_spec->numRecTaskMap = DEFAULT_NUMTASK; if (g_spec->workflow <= 0) g_spec->workflow = MAP_ONLY; } //-------------------------------------------------- //Add a map input record // //param : spec //param : key -- a pointer to a buffer //param : val -- a pointer to a buffer //param : keySize //param : valSize //-------------------------------------------------- void AddMapInputRecord(Spec_t* spec, void* key, void* val, int keySize, int valSize) { assert(NULL != spec); static int2 curOffset; static int3 curChunkNum; int index = spec->inputRecordCount; const int dataChunkSize = 1024*1024*256; if (spec->inputRecordCount > 0) { if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize)) spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize); memcpy(spec->inputKeys+curOffset.x, key, keySize); if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize)) spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize); memcpy(spec->inputVals+curOffset.y, val, valSize); if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4)) spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes, (++curChunkNum.z)*dataChunkSize); } else { spec->inputKeys = (char*)malloc(dataChunkSize); if (NULL == spec->inputKeys) exit(-1); memcpy(spec->inputKeys, key, keySize); spec->inputVals = (char*)malloc(dataChunkSize); if (NULL == spec->inputVals) exit(-1); memcpy(spec->inputVals, val, valSize); spec->inputOffsetSizes = (int4*)malloc(dataChunkSize); curChunkNum.x++; curChunkNum.y++; curChunkNum.z++; } spec->inputOffsetSizes[index].x = curOffset.x; spec->inputOffsetSizes[index].y = keySize; spec->inputOffsetSizes[index].z = curOffset.y; spec->inputOffsetSizes[index].w = valSize; curOffset.x += keySize; curOffset.y += valSize; spec->inputRecordCount++; } //------------------------------------------------- //Called by user defined map_count function // //param : keySize //param : valSize //param : interKeysSizePerTask //param : interValsSizePerTask //param : interCountPerTask //------------------------------------------------- __device__ void EmitInterCount(int keySize, int valSize, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask) { int index = TID; interKeysSizePerTask[index] += keySize; interValsSizePerTask[index] += valSize; interCountPerTask[index]++; } //------------------------------------------------- //called by user defined map function // //------------------------------------------------- __device__ void EmitIntermediate(void* key, void* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; int2 l_keyValOffsets = keyValOffsets[index]; char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x); char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y); char* sKey = (char*)key; char* sVal = (char*)val; for (int i = 0; i < keySize; ++i) pKeySet[i] = sKey[i]; for (int i = 0; i < valSize; ++i) pValSet[i] = sVal[i]; l_keyValOffsets.x += keySize; l_keyValOffsets.y += valSize; keyValOffsets[index] = l_keyValOffsets; int l_curIndex = curIndex[index]; int l_psCounts = psCounts[index]; int l_curPs = l_curIndex + l_psCounts; int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs]; int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1]; if (l_curIndex != 0) { l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y); l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w); } l_interOffsetSizes1.y = keySize; l_interOffsetSizes1.w = valSize; interOffsetSizes[l_curPs] = l_interOffsetSizes1; ++l_curIndex; curIndex[index] = l_curIndex; } //------------------------------------------------- //Calculate intermediate data's size // //param : inputKeys //param : inputVals //param : inputOffsetSizes //param : interKeysSizesPerTask //param : interValsSizePerTask //param : interCountPerTask //param : recordNum -- total number of records //param : recordsPerTask //------------------------------------------------- __global__ void MapperCount(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map_count(key, val, offsetSize.y, offsetSize.w, interKeysSizePerTask, interValsSizePerTask, interCountPerTask); } } //-------------------------------------------------- //mapper //-------------------------------------------------- __global__ void Mapper(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; int l_psCounts = psCounts[index]; int4 l_interOffsetSizes = interOffsetSizes[l_psCounts]; l_interOffsetSizes.x = psKeySizes[index]; l_interOffsetSizes.z = psValSizes[index]; interOffsetSizes[l_psCounts] = l_interOffsetSizes; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map(key, val, offsetSize.y, offsetSize.w, psKeySizes, psValSizes, psCounts, keyValOffsets, interKeys, interVals, interOffsetSizes, curIndex); } } //-------------------------------------------------- //start map // //1, get map input data on host //2, upload map input data to device memory // (keys, vals, keyOffsets, valOffsets, keySizes, valSizes) //3, determine the number of threads to run //4, calculate intermediate data keys'buf size // and values' buf size //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //6, allocate intermediate memory on device memory //7, start map //8, free allocated memory //-------------------------------------------------- int startMap(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);} if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); } if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); } if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);} //------------------------------------------------------- //1, get map input data on host //------------------------------------------------------- int h_inputRecordCount = g_spec->inputRecordCount; int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x + g_spec->inputOffsetSizes[h_inputRecordCount-1].y; int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z + g_spec->inputOffsetSizes[h_inputRecordCount-1].w; char* h_inputKeys = g_spec->inputKeys; char* h_inputVals = g_spec->inputVals; int4* h_inputOffsetSizes = g_spec->inputOffsetSizes; DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount); //------------------------------------------------------- //2, upload map input data onto device memory //------------------------------------------------------- DoLog( "** Upload map input data onto device memory"); TimeVal_t uploadTv; startTimer(&uploadTv); char* d_inputKeys = NULL; char* d_inputVals = NULL; int4* d_inputOffsetSizes = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputKeys, h_inputKeysBufSize)); CUDA_SAFE_CALL(cudaMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputVals, h_inputValsBufSize)); CUDA_SAFE_CALL(cudaMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount)); cudaMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, cudaMemcpyHostToDevice); endTimer("PCI-E I/O", &uploadTv); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockMap,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskMap; int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; TimeVal_t mapTimer; startTimer(&mapTimer); //---------------------------------------------- //4, calculate intermediate data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** MapCount"); int* d_interKeysSizePerTask = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads)); cudaMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interValsSizePerTask = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads)); cudaMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interCountPerTask = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads)); cudaMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads); MapperCount<<<h_dimGrid, h_dimBlock>>>(d_inputKeys, d_inputVals, d_inputOffsetSizes, d_interKeysSizePerTask, d_interValsSizePerTask, d_interCountPerTask, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); //----------------------------------------------- //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on intermediate data's size\n"); int *d_psKeySizes = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads)); int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads)); int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads)); int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts); if (h_allCounts == 0) { DoLog( "** No output."); cudaFree(d_inputKeys); cudaFree(d_inputVals); cudaFree(d_inputOffsetSizes); cudaFree(d_psKeySizes); cudaFree(d_psValSizes); cudaFree(d_psCounts); endTimer("Map", &mapTimer); return 1; } //----------------------------------------------- //6, allocate intermediate memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_interKeys = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_interKeys, h_allKeySize)); cudaMemset(d_interKeys, 0, h_allKeySize); char* d_interVals = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_interVals, h_allValSize)); cudaMemset(d_interVals, 0, h_allValSize); int4* d_interOffsetSizes = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts)); cudaMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts); //-------------------------------------------------- //7, start map //-------------------------------------------------- DoLog( "** Map"); int2* d_keyValOffsets = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads)); cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads)); cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; Mapper<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_inputKeys, d_inputVals, d_inputOffsetSizes, d_psKeySizes, d_psValSizes, d_psCounts, d_keyValOffsets, d_interKeys, d_interVals, d_interOffsetSizes, d_curIndex, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); g_spec->interKeys = d_interKeys; g_spec->interVals = d_interVals; g_spec->interOffsetSizes = d_interOffsetSizes; g_spec->interRecordCount = h_allCounts; g_spec->interDiffKeyCount = h_allCounts; g_spec->interAllKeySize = h_allKeySize; g_spec->interAllValSize = h_allValSize; //---------------------------------------------- //8, free //---------------------------------------------- cudaFree(d_interKeysSizePerTask); cudaFree(d_interValsSizePerTask); cudaFree(d_interCountPerTask); cudaFree(d_keyValOffsets); cudaFree(d_curIndex); cudaFree(d_inputKeys); cudaFree(d_inputVals); cudaFree(d_inputOffsetSizes); cudaFree(d_psKeySizes); cudaFree(d_psValSizes); cudaFree(d_psCounts); endTimer("Map", &mapTimer); return 0; } void startGroup(Spec_t* spec) { Spec_t* g_spec = spec; int interDiffKeyCount = 0; char* d_outputKeys = NULL; char* d_outputVals = NULL; int4* d_outputOffsetSizes = NULL; int2** h_outputKeyListRange = NULL; DoLog( "** Sort for group"); CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputKeys, g_spec->interAllKeySize)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputVals, g_spec->interAllValSize)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount)); h_outputKeyListRange = (int2**)malloc(sizeof(int2*)); saven_initialPrefixSum(g_spec->interRecordCount); interDiffKeyCount = sort_GPU (g_spec->interKeys, g_spec->interAllKeySize, g_spec->interVals, g_spec->interAllValSize, g_spec->interOffsetSizes, g_spec->interRecordCount, d_outputKeys, d_outputVals, d_outputOffsetSizes, h_outputKeyListRange); DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount); g_spec->interKeys = d_outputKeys; g_spec->interVals = d_outputVals; g_spec->interOffsetSizes = d_outputOffsetSizes; g_spec->interDiffKeyCount = interDiffKeyCount; int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2); CUDA_SAFE_CALL(cudaMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize)); CUDA_SAFE_CALL(cudaMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, cudaMemcpyHostToDevice)); free(*h_outputKeyListRange); free(h_outputKeyListRange); } //-------------------------------------------------------- //get a value from value list of the same key // //param : vals //param : interOffsetSizes //param : index //return: the wanted value //-------------------------------------------------------- __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)vals + keyIndex * offset.w); } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)key + keyIndex * offset.y); } //--------------------------------------------------------- //called by user defined reduce_count function //--------------------------------------------------------- __device__ void EmitCount(int keySize, int valSize, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask) { int index = TID; outputKeysSizePerTask[index] += keySize; outputValsSizePerTask[index] += valSize; outputCountPerTask[index]++; } //--------------------------------------------------------- //called by user defined reduce function //--------------------------------------------------------- __device__ void Emit (char* key, char* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x); char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y); for (int i = 0; i < keySize; i++) pKeySet[i] = key[i]; for (int i = 0; i < valSize; i++) pValSet[i] = val[i]; keyValOffsets[index].x += keySize; keyValOffsets[index].y += valSize; if (curIndex[index] != 0) { outputOffsetSizes[psCounts[index] + curIndex[index]].x = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y); outputOffsetSizes[psCounts[index] + curIndex[index]].z = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w); } outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize; outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize; curIndex[index]++; } //------------------------------------------------------- //calculate output data's size //------------------------------------------------------- __global__ void ReducerCount(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; //for (int i = 0; i <= recordsPerTask; i++) for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce_count(key, vals, keySize, valCount, interOffsetSizes, outputKeysSizePerTask, outputValsSizePerTask, outputCountPerTask); } } //------------------------------------------------------- //Reducer // //------------------------------------------------------- __global__ void Reducer(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* psKeySizes, int* psValSizes, int* psCounts, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int2* keyValOffsets, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; outputOffsetSizes[psCounts[index]].x = psKeySizes[index]; outputOffsetSizes[psCounts[index]].z = psValSizes[index]; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce(key, vals, keySize, valCount, psKeySizes, psValSizes, psCounts, keyValOffsets, interOffsetSizes, outputKeys, outputVals, outputOffsetSizes, curIndex, valStartIndex); } } //---------------------------------------------- //start reduce // //1, if there is not a reduce phase, just return // then user uses spec->interKeys/spec->intervals // for further processing //2, get reduce input data on host //3, upload reduce input data onto device memory //4, determine the number of threads to run //5, calculate output data keys'buf size // and values' buf size //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //7, allocate output memory on device memory //8, start reduce //9, copy output data to Spect_t structure //10,free allocated memory //---------------------------------------------- void startReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);} if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);} if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);} if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);} if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);} if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);} //------------------------------------------------------- //2, get reduce input data on host //------------------------------------------------------- int h_interDiffKeyCount = g_spec->interDiffKeyCount; char* d_interKeys = g_spec->interKeys; char* d_interVals = g_spec->interVals; int4* d_interOffsetSizes = g_spec->interOffsetSizes; int2* d_interKeyListRange = g_spec->interKeyListRange; //---------------------------------------------- //4, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockReduce,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskReduce; int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; //---------------------------------------------- //5, calculate output data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** ReduceCount"); int* d_outputKeysSizePerTask = NULL; cudaMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads); cudaMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputValsSizePerTask = NULL; cudaMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads); cudaMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputCountPerTask = NULL; cudaMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads); cudaMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads); ReducerCount<<<h_dimGrid, h_dimBlock>>>(d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_outputKeysSizePerTask, d_outputValsSizePerTask, d_outputCountPerTask, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); //----------------------------------------------- //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on output data's size"); int *d_psKeySizes = NULL; cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads); cudaMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads); int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads); cudaMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads); int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads); cudaMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads); int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts); //----------------------------------------------- //7, allocate output memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_outputKeys = NULL; cudaMalloc((void**)&d_outputKeys, h_allKeySize); char* d_outputVals = NULL; cudaMalloc((void**)&d_outputVals, h_allValSize); int4* d_outputOffsetSizes = NULL; cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts); //-------------------------------------------------- //8, start reduce //-------------------------------------------------- DoLog( "** Reduce"); int2* d_keyValOffsets = NULL; cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads); cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads); cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; Reducer<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_psKeySizes, d_psValSizes, d_psCounts, d_outputKeys, d_outputVals, d_outputOffsetSizes, d_keyValOffsets, d_curIndex, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); //------------------------------------------------------- //9, copy output data to Spec_t structure //------------------------------------------------------- g_spec->outputKeys = d_outputKeys; g_spec->outputVals = d_outputVals; g_spec->outputOffsetSizes = d_outputOffsetSizes; g_spec->outputRecordCount = h_allCounts; g_spec->outputAllKeySize = h_allKeySize; g_spec->outputAllValSize = h_allValSize; //---------------------------------------------- //10, free allocated memory //---------------------------------------------- cudaFree(d_interKeys); cudaFree(d_interVals); cudaFree(d_interOffsetSizes); cudaFree(d_outputKeysSizePerTask); cudaFree(d_outputValsSizePerTask); cudaFree(d_outputCountPerTask); cudaFree(d_psKeySizes); cudaFree(d_psValSizes); cudaFree(d_psCounts); cudaFree(d_keyValOffsets); cudaFree(d_curIndex); } //---------------------------------------------- //start main map reduce procedure //1, init device //2, start map //3, start reduce // //param : spec //---------------------------------------------- void MapReduce(Spec_t *spec) { assert(NULL != spec); Spec_t* g_spec = spec; DoLog( "=====start map/reduce====="); //------------------------------------------- //1, init device //------------------------------------------- //CUT_DEVICE_INIT(); DoLog( "** init GPU"); InitMapReduce(spec); //------------------------------------------- //2, start map //------------------------------------------- DoLog( "----------start map-----------"); if (startMap(spec)) { printf("** No output."); return; } if (g_spec->workflow == MAP_ONLY) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; goto EXIT_MR; } //------------------------------------------- //3, start group //------------------------------------------- DoLog( "----------start group-----------"); TimeVal_t groupTimer; startTimer(&groupTimer); startGroup(spec); endTimer("Group", &groupTimer); if (g_spec->workflow == MAP_GROUP) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount; if (g_spec->outputToHost == 1) { g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount); CUDA_SAFE_CALL(cudaMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(g_spec->interKeyListRange)); } goto EXIT_MR; } //------------------------------------------- //4, start reduce //------------------------------------------- DoLog( "----------start reduce--------"); TimeVal_t reduceTimer; startTimer(&reduceTimer); startReduce(spec); endTimer("Reduce", &reduceTimer); EXIT_MR: if (g_spec->outputToHost == 1) { int indexSize = g_spec->outputRecordCount * sizeof(int4); char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize); if (h_outputKeys == NULL) exit(0); char* h_outputVals = (char*)malloc(g_spec->outputAllValSize); if (h_outputVals == NULL) exit(0); int4* h_outputOffsetSizes = (int4*)malloc(indexSize); if (h_outputOffsetSizes == NULL) exit(0); CUDA_SAFE_CALL(cudaMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(g_spec->outputKeys)); CUDA_SAFE_CALL(cudaFree(g_spec->outputVals)); CUDA_SAFE_CALL(cudaFree(g_spec->outputOffsetSizes)); g_spec->outputKeys = h_outputKeys; g_spec->outputVals = h_outputVals; g_spec->outputOffsetSizes = h_outputOffsetSizes; } } //------------------------------------------ //the last step // //1, free global variables' memory //2, close log file's file pointer //------------------------------------------ void FinishMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; //------------------------------------------- //1, free global variables' memory //------------------------------------------- free(g_spec->inputKeys); free(g_spec->inputVals); free(g_spec->inputOffsetSizes); if (g_spec->outputToHost == 1) { free(g_spec->outputKeys); free(g_spec->outputVals); free(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) free(g_spec->outputKeyListRange); } else { cudaFree(g_spec->outputKeys); cudaFree(g_spec->outputVals); cudaFree(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) cudaFree(g_spec->outputKeyListRange); } free(g_spec); DoLog( "=====finish map/reduce====="); } #endif //__MRLIB_CU__
8df892bbdbb5ff0a5d22223ccb73bea8c7e3a26d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <list> #define SIGN(x) (x >= 1) ? (short)1 : (short)0 #define NUM_STATES (1<<18)//23 maior que 17 da erro no vetor h_transients #define NUM_COPYS (1<<20)//22 10 #define NUM_NOS 96 #define N 3 using namespace std; //typedef unsigned long uint64; typedef unsigned int uint32; __device__ bool comp(uint32 *S0, uint32 *S1){ for (int i = 0; i < N; ++i) { if(S1[i] != S0[i]) return false; } return true; } __device__ short getDecValue(short v){ return v? v:(short)-1; } __device__ short getBit(int idx,uint32 v){ idx = idx % 32; return (short)((v >> idx) & 1); } __device__ void setBit(int idx, short newV, uint32 *v){ idx = idx % 32; *v &= ~(1 << idx); *v |= ((uint32)newV << idx); } __device__ short getBlockIdx(int idx){ if(idx<32) return 0; else if(idx>63) return 2; else return 1; } __device__ void pass(uint32 *S) { uint32 Sc[N]; for (int i = 0; i < N; ++i) { Sc[i] = S[i]; } setBit(0,1,&S[getBlockIdx(0)]); setBit(1,1,&S[getBlockIdx(1)]); setBit(2,0,&S[getBlockIdx(2)]); setBit(3,0,&S[getBlockIdx(3)]); setBit(4,0,&S[getBlockIdx(4)]); setBit(5,0,&S[getBlockIdx(5)]); setBit(6,1,&S[getBlockIdx(6)]); setBit(7,1,&S[getBlockIdx(7)]); setBit(8,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(8)]); setBit(9,SIGN( + getDecValue(getBit(0,Sc[getBlockIdx(0)])) + getDecValue(getBit(71,Sc[getBlockIdx(71)]))),&S[getBlockIdx(9)]); setBit(10,SIGN( + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + getDecValue(getBit(60,Sc[getBlockIdx(60)])) + (short)(-1)),&S[getBlockIdx(10)]); setBit(11,SIGN( - (getDecValue(getBit(2,Sc[getBlockIdx(2)]))) + (short) 1),&S[getBlockIdx(11)]); setBit(12,SIGN( - (getDecValue(getBit(13,Sc[getBlockIdx(13)]))) + (short) 1),&S[getBlockIdx(12)]); setBit(13,SIGN( + getDecValue(getBit(14,Sc[getBlockIdx(14)])) + getDecValue(getBit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(13)]); setBit(14,SIGN( + getDecValue(getBit(1,Sc[getBlockIdx(1)]))),&S[getBlockIdx(14)]); setBit(15,SIGN( + getDecValue(getBit(2,Sc[getBlockIdx(2)])) - (getDecValue(getBit(4,Sc[getBlockIdx(4)])))),&S[getBlockIdx(15)]); setBit(16,SIGN( - (getDecValue(getBit(12,Sc[getBlockIdx(12)]))) + getDecValue(getBit(14,Sc[getBlockIdx(14)])) + (short) 1),&S[getBlockIdx(16)]); setBit(17,SIGN( + getDecValue(getBit(16,Sc[getBlockIdx(16)])) + getDecValue(getBit(78,Sc[getBlockIdx(78)]))),&S[getBlockIdx(17)]); setBit(18,SIGN( + getDecValue(getBit(17,Sc[getBlockIdx(17)])) - (getDecValue(getBit(7,Sc[getBlockIdx(7)]))) - (getDecValue(getBit(92,Sc[getBlockIdx(92)]))) + (short) 1),&S[getBlockIdx(18)]); setBit(19,SIGN( + getDecValue(getBit(18,Sc[getBlockIdx(18)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(19)]); setBit(20,SIGN( + getDecValue(getBit(13,Sc[getBlockIdx(13)])) + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(33,Sc[getBlockIdx(33)])) - (getDecValue(getBit(37,Sc[getBlockIdx(37)]))) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + getDecValue(getBit(82,Sc[getBlockIdx(82)]))),&S[getBlockIdx(20)]); setBit(21,SIGN( + getDecValue(getBit(18,Sc[getBlockIdx(18)])) + getDecValue(getBit(20,Sc[getBlockIdx(20)])) + getDecValue(getBit(20,Sc[getBlockIdx(20)])) - (getDecValue(getBit(76,Sc[getBlockIdx(76)]))) + getDecValue(getBit(95,Sc[getBlockIdx(95)])) + (short)(-1)),&S[getBlockIdx(21)]); setBit(22,SIGN( + getDecValue(getBit(13,Sc[getBlockIdx(13)])) + getDecValue(getBit(16,Sc[getBlockIdx(16)]))),&S[getBlockIdx(22)]); setBit(23,SIGN( + getDecValue(getBit(22,Sc[getBlockIdx(22)]))),&S[getBlockIdx(23)]); setBit(24,SIGN( + getDecValue(getBit(19,Sc[getBlockIdx(19)])) + getDecValue(getBit(23,Sc[getBlockIdx(23)]))),&S[getBlockIdx(24)]); setBit(25,SIGN( + getDecValue(getBit(18,Sc[getBlockIdx(18)])) + getDecValue(getBit(19,Sc[getBlockIdx(19)])) + (short)(-1)),&S[getBlockIdx(25)]); setBit(26,SIGN( - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + getDecValue(getBit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(26)]); setBit(27,SIGN( + getDecValue(getBit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(27)]); setBit(28,SIGN( + getDecValue(getBit(7,Sc[getBlockIdx(7)])) + (short) 1),&S[getBlockIdx(28)]); setBit(29,SIGN( - (getDecValue(getBit(24,Sc[getBlockIdx(24)]))) - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) - (getDecValue(getBit(27,Sc[getBlockIdx(27)]))) - (getDecValue(getBit(33,Sc[getBlockIdx(33)]))) + (short)3),&S[getBlockIdx(29)]); setBit(30,SIGN( + getDecValue(getBit(28,Sc[getBlockIdx(28)])) + getDecValue(getBit(29,Sc[getBlockIdx(29)])) + (short)(-1)),&S[getBlockIdx(30)]); setBit(31,SIGN( - (getDecValue(getBit(30,Sc[getBlockIdx(30)]))) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + (short) 1),&S[getBlockIdx(31)]); setBit(32,SIGN( - (getDecValue(getBit(10,Sc[getBlockIdx(10)]))) + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(84,Sc[getBlockIdx(84)]))),&S[getBlockIdx(32)]); setBit(33,SIGN( + getDecValue(getBit(15,Sc[getBlockIdx(15)])) + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(42,Sc[getBlockIdx(42)])) - (getDecValue(getBit(72,Sc[getBlockIdx(72)]))) + (short)(-1)),&S[getBlockIdx(33)]); setBit(34,SIGN( + getDecValue(getBit(4,Sc[getBlockIdx(4)])) + getDecValue(getBit(33,Sc[getBlockIdx(33)])) - (getDecValue(getBit(36,Sc[getBlockIdx(36)]))) - (getDecValue(getBit(36,Sc[getBlockIdx(36)]))) - (getDecValue(getBit(37,Sc[getBlockIdx(37)]))) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(69,Sc[getBlockIdx(69)]))) + (short)2),&S[getBlockIdx(34)]); setBit(35,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(35)]); setBit(36,SIGN( - (getDecValue(getBit(4,Sc[getBlockIdx(4)]))) - (getDecValue(getBit(71,Sc[getBlockIdx(71)]))) + (short) 1),&S[getBlockIdx(36)]); setBit(37,SIGN( - (getDecValue(getBit(4,Sc[getBlockIdx(4)]))) + getDecValue(getBit(71,Sc[getBlockIdx(71)])) + (short) 1),&S[getBlockIdx(37)]); setBit(38,SIGN( - (getDecValue(getBit(8,Sc[getBlockIdx(8)]))) + getDecValue(getBit(39,Sc[getBlockIdx(39)])) + getDecValue(getBit(6,Sc[getBlockIdx(6)])) - (getDecValue(getBit(40,Sc[getBlockIdx(40)]))) - (getDecValue(getBit(63,Sc[getBlockIdx(63)]))) + (short)(-1)),&S[getBlockIdx(38)]); setBit(39,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(23,Sc[getBlockIdx(23)])) - (getDecValue(getBit(34,Sc[getBlockIdx(34)]))) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + getDecValue(getBit(70,Sc[getBlockIdx(70)])) + getDecValue(getBit(84,Sc[getBlockIdx(84)])) + getDecValue(getBit(5,Sc[getBlockIdx(5)])) + (short)(-1)),&S[getBlockIdx(39)]); setBit(40,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(40)]); setBit(41,SIGN( - (getDecValue(getBit(22,Sc[getBlockIdx(22)]))) - (getDecValue(getBit(23,Sc[getBlockIdx(23)]))) - (getDecValue(getBit(24,Sc[getBlockIdx(24)]))) - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + getDecValue(getBit(72,Sc[getBlockIdx(72)])) + (short) 1),&S[getBlockIdx(41)]); setBit(42,SIGN( - (getDecValue(getBit(41,Sc[getBlockIdx(41)]))) + (short) 1),&S[getBlockIdx(42)]); setBit(43,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)])) - (getDecValue(getBit(44,Sc[getBlockIdx(44)]))) - (getDecValue(getBit(60,Sc[getBlockIdx(60)]))) + getDecValue(getBit(88,Sc[getBlockIdx(88)])) + (short) 1),&S[getBlockIdx(43)]); setBit(44,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(21,Sc[getBlockIdx(21)])) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(45,Sc[getBlockIdx(45)]))) - (getDecValue(getBit(46,Sc[getBlockIdx(46)])))),&S[getBlockIdx(44)]); setBit(45,SIGN( - (getDecValue(getBit(34,Sc[getBlockIdx(34)]))) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) - (getDecValue(getBit(44,Sc[getBlockIdx(44)]))) + getDecValue(getBit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(45)]); setBit(46,SIGN( - (getDecValue(getBit(22,Sc[getBlockIdx(22)]))) - (getDecValue(getBit(24,Sc[getBlockIdx(24)]))) - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) - (getDecValue(getBit(34,Sc[getBlockIdx(34)]))) + (short) 1),&S[getBlockIdx(46)]); setBit(47,SIGN( - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(46,Sc[getBlockIdx(46)]))) + (short) 1),&S[getBlockIdx(47)]); setBit(48,SIGN( - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) - (getDecValue(getBit(53,Sc[getBlockIdx(53)]))) - (getDecValue(getBit(54,Sc[getBlockIdx(54)]))) - (getDecValue(getBit(60,Sc[getBlockIdx(60)]))) + (short)2),&S[getBlockIdx(48)]); setBit(49,SIGN( - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + (short) 1),&S[getBlockIdx(49)]); setBit(50,SIGN( + getDecValue(getBit(16,Sc[getBlockIdx(16)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + (short)(-3)),&S[getBlockIdx(50)]); setBit(51,SIGN( + getDecValue(getBit(51,Sc[getBlockIdx(51)])) - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) - (getDecValue(getBit(56,Sc[getBlockIdx(56)]))) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)]))) + getDecValue(getBit(80,Sc[getBlockIdx(80)])) + getDecValue(getBit(81,Sc[getBlockIdx(81)]))),&S[getBlockIdx(51)]); setBit(52,SIGN( - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(55,Sc[getBlockIdx(55)]))) - (getDecValue(getBit(56,Sc[getBlockIdx(56)]))) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)]))) + (short) 1),&S[getBlockIdx(52)]); setBit(53,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) - (getDecValue(getBit(29,Sc[getBlockIdx(29)]))) - (getDecValue(getBit(29,Sc[getBlockIdx(29)]))) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)]))) - (getDecValue(getBit(64,Sc[getBlockIdx(64)]))) - (getDecValue(getBit(69,Sc[getBlockIdx(69)]))) + getDecValue(getBit(70,Sc[getBlockIdx(70)])) + getDecValue(getBit(84,Sc[getBlockIdx(84)])) + getDecValue(getBit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(53)]); setBit(54,SIGN( - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)])))),&S[getBlockIdx(54)]); setBit(55,SIGN( - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) + getDecValue(getBit(56,Sc[getBlockIdx(56)])) + (short) 1),&S[getBlockIdx(55)]); setBit(56,SIGN( + getDecValue(getBit(52,Sc[getBlockIdx(52)])) - (getDecValue(getBit(55,Sc[getBlockIdx(55)])))),&S[getBlockIdx(56)]); setBit(57,SIGN( + getDecValue(getBit(51,Sc[getBlockIdx(51)])) + getDecValue(getBit(52,Sc[getBlockIdx(52)])) - (getDecValue(getBit(55,Sc[getBlockIdx(55)]))) + getDecValue(getBit(56,Sc[getBlockIdx(56)])) + getDecValue(getBit(57,Sc[getBlockIdx(57)]))),&S[getBlockIdx(57)]); setBit(58,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) - (getDecValue(getBit(38,Sc[getBlockIdx(38)]))) - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) - (getDecValue(getBit(53,Sc[getBlockIdx(53)]))) + getDecValue(getBit(62,Sc[getBlockIdx(62)])) + (short) 1),&S[getBlockIdx(58)]); setBit(59,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) - (getDecValue(getBit(38,Sc[getBlockIdx(38)]))) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + getDecValue(getBit(62,Sc[getBlockIdx(62)])) - (getDecValue(getBit(78,Sc[getBlockIdx(78)]))) + (short) 1),&S[getBlockIdx(59)]); setBit(60,SIGN( + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) - (getDecValue(getBit(50,Sc[getBlockIdx(50)]))) - (getDecValue(getBit(87,Sc[getBlockIdx(87)]))) + (short) 1),&S[getBlockIdx(60)]); setBit(61,SIGN( + getDecValue(getBit(3,Sc[getBlockIdx(3)])) + getDecValue(getBit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(61)]); setBit(62,SIGN( + getDecValue(getBit(61,Sc[getBlockIdx(61)])) + getDecValue(getBit(85,Sc[getBlockIdx(85)])) + (short)(-1)),&S[getBlockIdx(62)]); setBit(63,SIGN( + getDecValue(getBit(61,Sc[getBlockIdx(61)]))),&S[getBlockIdx(63)]); setBit(64,SIGN( + getDecValue(getBit(62,Sc[getBlockIdx(62)])) + getDecValue(getBit(85,Sc[getBlockIdx(85)]))),&S[getBlockIdx(64)]); setBit(65,SIGN( + getDecValue(getBit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(65)]); setBit(66,SIGN( + getDecValue(getBit(65,Sc[getBlockIdx(65)]))),&S[getBlockIdx(66)]); setBit(67,SIGN( + getDecValue(getBit(66,Sc[getBlockIdx(66)]))),&S[getBlockIdx(67)]); setBit(68,SIGN( + getDecValue(getBit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(68)]); setBit(69,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + (short)2),&S[getBlockIdx(69)]); setBit(70,SIGN( + getDecValue(getBit(23,Sc[getBlockIdx(23)])) + getDecValue(getBit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(70)]); setBit(71,SIGN( - (getDecValue(getBit(35,Sc[getBlockIdx(35)]))) - (getDecValue(getBit(83,Sc[getBlockIdx(83)])))),&S[getBlockIdx(71)]); setBit(72,SIGN( - (getDecValue(getBit(1,Sc[getBlockIdx(1)]))) + getDecValue(getBit(11,Sc[getBlockIdx(11)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(87,Sc[getBlockIdx(87)])) + (short) 1),&S[getBlockIdx(72)]); setBit(73,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) - (getDecValue(getBit(44,Sc[getBlockIdx(44)]))) + getDecValue(getBit(45,Sc[getBlockIdx(45)])) - (getDecValue(getBit(47,Sc[getBlockIdx(47)]))) + getDecValue(getBit(66,Sc[getBlockIdx(66)])) + getDecValue(getBit(67,Sc[getBlockIdx(67)]))),&S[getBlockIdx(73)]); setBit(74,SIGN( + getDecValue(getBit(73,Sc[getBlockIdx(73)]))),&S[getBlockIdx(74)]); setBit(75,SIGN( + getDecValue(getBit(66,Sc[getBlockIdx(66)])) + getDecValue(getBit(74,Sc[getBlockIdx(74)]))),&S[getBlockIdx(75)]); setBit(76,SIGN( - (getDecValue(getBit(21,Sc[getBlockIdx(21)]))) - (getDecValue(getBit(32,Sc[getBlockIdx(32)]))) - (getDecValue(getBit(95,Sc[getBlockIdx(95)]))) + (short)3),&S[getBlockIdx(76)]); setBit(77,SIGN( + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + (short)(-1)),&S[getBlockIdx(77)]); setBit(78,SIGN( + getDecValue(getBit(12,Sc[getBlockIdx(12)])) + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(62,Sc[getBlockIdx(62)]))) - (getDecValue(getBit(91,Sc[getBlockIdx(91)]))) + (short)(-4)),&S[getBlockIdx(78)]); setBit(79,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(79)]); setBit(80,SIGN( + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + getDecValue(getBit(54,Sc[getBlockIdx(54)])) + (short)(-1)),&S[getBlockIdx(80)]); setBit(81,SIGN( + getDecValue(getBit(55,Sc[getBlockIdx(55)])) + getDecValue(getBit(57,Sc[getBlockIdx(57)])) + (short)(-1)),&S[getBlockIdx(81)]); setBit(82,SIGN( + getDecValue(getBit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(82)]); setBit(83,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + getDecValue(getBit(59,Sc[getBlockIdx(59)]))),&S[getBlockIdx(83)]); setBit(84,SIGN( + getDecValue(getBit(31,Sc[getBlockIdx(31)])) - (getDecValue(getBit(82,Sc[getBlockIdx(82)])))),&S[getBlockIdx(84)]); setBit(85,SIGN( - (getDecValue(getBit(38,Sc[getBlockIdx(38)]))) + (short) 1),&S[getBlockIdx(85)]); setBit(86,SIGN( + getDecValue(getBit(19,Sc[getBlockIdx(19)])) + getDecValue(getBit(33,Sc[getBlockIdx(33)]))),&S[getBlockIdx(86)]); setBit(87,SIGN( + getDecValue(getBit(9,Sc[getBlockIdx(9)]))),&S[getBlockIdx(87)]); setBit(88,SIGN( + getDecValue(getBit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(88)]); setBit(89,SIGN( + getDecValue(getBit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(89)]); setBit(90,SIGN( + getDecValue(getBit(24,Sc[getBlockIdx(24)])) + getDecValue(getBit(86,Sc[getBlockIdx(86)]))),&S[getBlockIdx(90)]); setBit(91,SIGN( - (getDecValue(getBit(90,Sc[getBlockIdx(90)]))) + (short) 1),&S[getBlockIdx(91)]); setBit(92,SIGN( + getDecValue(getBit(7,Sc[getBlockIdx(7)])) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + (short)(-1)),&S[getBlockIdx(92)]); setBit(93,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + (short)(-1)),&S[getBlockIdx(93)]); setBit(94,SIGN( + getDecValue(getBit(93,Sc[getBlockIdx(93)]))),&S[getBlockIdx(94)]); setBit(95,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) - (getDecValue(getBit(29,Sc[getBlockIdx(29)]))) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + getDecValue(getBit(61,Sc[getBlockIdx(61)])) + (short)(-1)),&S[getBlockIdx(95)]); } __global__ void findAttractor(uint32 *attractors, uint32 *transients, uint32 *periods, uint32_t numThreads){ int transient = 0, period = 0; uint32 S0[N], S1[N]; uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; uint32_t step = NUM_STATES / NUM_COPYS; uint32_t rest = NUM_STATES % NUM_COPYS; uint32_t begin = 0; uint32_t end = step - 1; hiprandState_t state; hiprand_init(thread, /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 1, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); bool flag = true; if(thread < numThreads){ if(rest > 0){ end = end + 1; rest = rest - 1; }else{ flag = false; } for(uint32_t i = 0; i < NUM_COPYS;i++){ if(i == thread) break; if(rest > 0){ end = end + 1; begin = begin + 1; rest = rest - 1; }else if(rest == 0 && flag){ begin = begin + 1; flag = 0; } begin += step; end += step; } for (uint32 i = begin; i <= end; ++i) { S0[0] = S1[0] = hiprand(&state); S0[1] = S1[1] = hiprand(&state); S0[2] = S1[2] = hiprand(&state); setBit(0, 1, &S0[getBlockIdx(0)]); // garante as entradas fixas setBit(1, 1, &S0[getBlockIdx(1)]); setBit(2, 0, &S0[getBlockIdx(2)]); setBit(3, 0, &S0[getBlockIdx(3)]); setBit(4, 0, &S0[getBlockIdx(4)]); setBit(5, 0, &S0[getBlockIdx(5)]); setBit(6, 1, &S0[getBlockIdx(6)]); setBit(7, 1, &S0[getBlockIdx(7)]); setBit(0, 1, &S1[getBlockIdx(0)]); setBit(1, 1, &S1[getBlockIdx(1)]); setBit(2, 0, &S1[getBlockIdx(2)]); setBit(3, 0, &S1[getBlockIdx(3)]); setBit(4, 0, &S1[getBlockIdx(4)]); setBit(5, 0, &S1[getBlockIdx(5)]); setBit(6, 1, &S1[getBlockIdx(6)]); setBit(7, 1, &S1[getBlockIdx(7)]); transient = 0; period = 0; do{ pass(S0); pass(S0); pass(S1); transient++; }while(!comp(S0,S1)); do{ pass(S0); period++; }while (!comp(S0,S1)); period--; //printf("%u %u, %u , %u, Trans %u, Per %u\n",i,S0[0], S0[1], S0[2],transient,period); transients[i] = transient; periods[i]= period; for(int s = 0; s < N; s++){ attractors[(i * N) + s] = S0[s]; } //cout << transient << " " << period << std::endl; } } } int main() { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size size_t numState = NUM_STATES; size_t size = N * numState * sizeof(uint32); size_t size_transients = numState*sizeof(uint32); size_t size_periods = numState*sizeof(uint32); size_t totalBytes = size+size_transients+size_periods; size_t kb = totalBytes/(1024); size_t mb = kb/(1024); size_t gb = mb/(1024); printf("Find attractors net %d nodes in %lu initials states.\n", N,numState); printf("Memory usage: %lu Gb or %lu Mb or %lu Kb.\n", gb, mb, kb); uint32 *h_transients = (uint32*)malloc(size_transients); // Verifica se houve sucesso na alocao do vetor h_transients if (h_transients == NULL){ fprintf(stderr, "Failed to allocate h_transients!\n"); exit(EXIT_FAILURE); } uint32 *h_periods = (uint32*)malloc(size_periods); // Verifica se houve sucesso na alocao do vetor h_periods if (h_periods == NULL){ fprintf(stderr, "Failed to allocate h_periods!\n"); exit(EXIT_FAILURE); } //Aloca o vetor para a saida no host uint32 *h_attractors = (uint32*)malloc(size); // Verifica se houve sucesso na alocao do vetor h_attractors if (h_attractors == NULL){ fprintf(stderr, "Failed to allocate h_attractors!\n"); exit(EXIT_FAILURE); } // Aloca os vetores na GPU (device) uint32 *d_transients = NULL; err = hipMalloc((void **)&d_transients, NUM_STATES * sizeof(uint32)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate d_transients (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } uint32 *d_periods = NULL; err = hipMalloc((void **)&d_periods, NUM_STATES * sizeof(uint32)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate d_periods (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } uint32 *d_attractors = NULL; err = hipMalloc((void **)&d_attractors, size); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate d_attractors (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel uint32_t threadsPerBlock = 256; uint32_t blocksPerGrid = (NUM_COPYS + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( findAttractor), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_attractors, d_transients, d_periods, NUM_COPYS); //findAttractor<<< 1,1 >>>(d_attractors, d_transients, d_periods, NUM_COPYS); err = hipGetLastError(); if (err != hipSuccess){ fprintf(stderr, "Failed to launch findAttractor kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_transients, d_transients, size_transients, hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector d_transients from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(h_periods, d_periods, size_periods, hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector d_periods from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(h_attractors, d_attractors, size, hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector d_attractors from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } ///* // printf("Attractor found:\n"); // // // for(int i = 0; i < numState; i++){ // for(int j = 0; j < N; j++){ // printf("%u ", h_attractors[(i * N) + j]); // } // printf(" Trans:%u Per:%u\n", h_transients[i],h_periods[i]); // } // printf("\n"); //*/ err = hipFree(d_transients); if (err != hipSuccess){ fprintf(stderr, "Failed to free device vector d_transients (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_periods); if (err != hipSuccess){ fprintf(stderr, "Failed to free device vector d_periods (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_attractors); if (err != hipSuccess){ fprintf(stderr, "Failed to free device vector d_attractors (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_transients); free(h_periods); free(h_attractors); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess){ fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
8df892bbdbb5ff0a5d22223ccb73bea8c7e3a26d.cu
#include <iostream> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include <stdio.h> #include <list> #define SIGN(x) (x >= 1) ? (short)1 : (short)0 #define NUM_STATES (1<<18)//23 maior que 17 da erro no vetor h_transients #define NUM_COPYS (1<<20)//22 10 #define NUM_NOS 96 #define N 3 using namespace std; //typedef unsigned long uint64; typedef unsigned int uint32; __device__ bool comp(uint32 *S0, uint32 *S1){ for (int i = 0; i < N; ++i) { if(S1[i] != S0[i]) return false; } return true; } __device__ short getDecValue(short v){ return v? v:(short)-1; } __device__ short getBit(int idx,uint32 v){ idx = idx % 32; return (short)((v >> idx) & 1); } __device__ void setBit(int idx, short newV, uint32 *v){ idx = idx % 32; *v &= ~(1 << idx); *v |= ((uint32)newV << idx); } __device__ short getBlockIdx(int idx){ if(idx<32) return 0; else if(idx>63) return 2; else return 1; } __device__ void pass(uint32 *S) { uint32 Sc[N]; for (int i = 0; i < N; ++i) { Sc[i] = S[i]; } setBit(0,1,&S[getBlockIdx(0)]); setBit(1,1,&S[getBlockIdx(1)]); setBit(2,0,&S[getBlockIdx(2)]); setBit(3,0,&S[getBlockIdx(3)]); setBit(4,0,&S[getBlockIdx(4)]); setBit(5,0,&S[getBlockIdx(5)]); setBit(6,1,&S[getBlockIdx(6)]); setBit(7,1,&S[getBlockIdx(7)]); setBit(8,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(8)]); setBit(9,SIGN( + getDecValue(getBit(0,Sc[getBlockIdx(0)])) + getDecValue(getBit(71,Sc[getBlockIdx(71)]))),&S[getBlockIdx(9)]); setBit(10,SIGN( + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + getDecValue(getBit(60,Sc[getBlockIdx(60)])) + (short)(-1)),&S[getBlockIdx(10)]); setBit(11,SIGN( - (getDecValue(getBit(2,Sc[getBlockIdx(2)]))) + (short) 1),&S[getBlockIdx(11)]); setBit(12,SIGN( - (getDecValue(getBit(13,Sc[getBlockIdx(13)]))) + (short) 1),&S[getBlockIdx(12)]); setBit(13,SIGN( + getDecValue(getBit(14,Sc[getBlockIdx(14)])) + getDecValue(getBit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(13)]); setBit(14,SIGN( + getDecValue(getBit(1,Sc[getBlockIdx(1)]))),&S[getBlockIdx(14)]); setBit(15,SIGN( + getDecValue(getBit(2,Sc[getBlockIdx(2)])) - (getDecValue(getBit(4,Sc[getBlockIdx(4)])))),&S[getBlockIdx(15)]); setBit(16,SIGN( - (getDecValue(getBit(12,Sc[getBlockIdx(12)]))) + getDecValue(getBit(14,Sc[getBlockIdx(14)])) + (short) 1),&S[getBlockIdx(16)]); setBit(17,SIGN( + getDecValue(getBit(16,Sc[getBlockIdx(16)])) + getDecValue(getBit(78,Sc[getBlockIdx(78)]))),&S[getBlockIdx(17)]); setBit(18,SIGN( + getDecValue(getBit(17,Sc[getBlockIdx(17)])) - (getDecValue(getBit(7,Sc[getBlockIdx(7)]))) - (getDecValue(getBit(92,Sc[getBlockIdx(92)]))) + (short) 1),&S[getBlockIdx(18)]); setBit(19,SIGN( + getDecValue(getBit(18,Sc[getBlockIdx(18)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(19)]); setBit(20,SIGN( + getDecValue(getBit(13,Sc[getBlockIdx(13)])) + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(33,Sc[getBlockIdx(33)])) - (getDecValue(getBit(37,Sc[getBlockIdx(37)]))) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + getDecValue(getBit(82,Sc[getBlockIdx(82)]))),&S[getBlockIdx(20)]); setBit(21,SIGN( + getDecValue(getBit(18,Sc[getBlockIdx(18)])) + getDecValue(getBit(20,Sc[getBlockIdx(20)])) + getDecValue(getBit(20,Sc[getBlockIdx(20)])) - (getDecValue(getBit(76,Sc[getBlockIdx(76)]))) + getDecValue(getBit(95,Sc[getBlockIdx(95)])) + (short)(-1)),&S[getBlockIdx(21)]); setBit(22,SIGN( + getDecValue(getBit(13,Sc[getBlockIdx(13)])) + getDecValue(getBit(16,Sc[getBlockIdx(16)]))),&S[getBlockIdx(22)]); setBit(23,SIGN( + getDecValue(getBit(22,Sc[getBlockIdx(22)]))),&S[getBlockIdx(23)]); setBit(24,SIGN( + getDecValue(getBit(19,Sc[getBlockIdx(19)])) + getDecValue(getBit(23,Sc[getBlockIdx(23)]))),&S[getBlockIdx(24)]); setBit(25,SIGN( + getDecValue(getBit(18,Sc[getBlockIdx(18)])) + getDecValue(getBit(19,Sc[getBlockIdx(19)])) + (short)(-1)),&S[getBlockIdx(25)]); setBit(26,SIGN( - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + getDecValue(getBit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(26)]); setBit(27,SIGN( + getDecValue(getBit(26,Sc[getBlockIdx(26)]))),&S[getBlockIdx(27)]); setBit(28,SIGN( + getDecValue(getBit(7,Sc[getBlockIdx(7)])) + (short) 1),&S[getBlockIdx(28)]); setBit(29,SIGN( - (getDecValue(getBit(24,Sc[getBlockIdx(24)]))) - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) - (getDecValue(getBit(27,Sc[getBlockIdx(27)]))) - (getDecValue(getBit(33,Sc[getBlockIdx(33)]))) + (short)3),&S[getBlockIdx(29)]); setBit(30,SIGN( + getDecValue(getBit(28,Sc[getBlockIdx(28)])) + getDecValue(getBit(29,Sc[getBlockIdx(29)])) + (short)(-1)),&S[getBlockIdx(30)]); setBit(31,SIGN( - (getDecValue(getBit(30,Sc[getBlockIdx(30)]))) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + (short) 1),&S[getBlockIdx(31)]); setBit(32,SIGN( - (getDecValue(getBit(10,Sc[getBlockIdx(10)]))) + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(84,Sc[getBlockIdx(84)]))),&S[getBlockIdx(32)]); setBit(33,SIGN( + getDecValue(getBit(15,Sc[getBlockIdx(15)])) + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(42,Sc[getBlockIdx(42)])) - (getDecValue(getBit(72,Sc[getBlockIdx(72)]))) + (short)(-1)),&S[getBlockIdx(33)]); setBit(34,SIGN( + getDecValue(getBit(4,Sc[getBlockIdx(4)])) + getDecValue(getBit(33,Sc[getBlockIdx(33)])) - (getDecValue(getBit(36,Sc[getBlockIdx(36)]))) - (getDecValue(getBit(36,Sc[getBlockIdx(36)]))) - (getDecValue(getBit(37,Sc[getBlockIdx(37)]))) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(69,Sc[getBlockIdx(69)]))) + (short)2),&S[getBlockIdx(34)]); setBit(35,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(35)]); setBit(36,SIGN( - (getDecValue(getBit(4,Sc[getBlockIdx(4)]))) - (getDecValue(getBit(71,Sc[getBlockIdx(71)]))) + (short) 1),&S[getBlockIdx(36)]); setBit(37,SIGN( - (getDecValue(getBit(4,Sc[getBlockIdx(4)]))) + getDecValue(getBit(71,Sc[getBlockIdx(71)])) + (short) 1),&S[getBlockIdx(37)]); setBit(38,SIGN( - (getDecValue(getBit(8,Sc[getBlockIdx(8)]))) + getDecValue(getBit(39,Sc[getBlockIdx(39)])) + getDecValue(getBit(6,Sc[getBlockIdx(6)])) - (getDecValue(getBit(40,Sc[getBlockIdx(40)]))) - (getDecValue(getBit(63,Sc[getBlockIdx(63)]))) + (short)(-1)),&S[getBlockIdx(38)]); setBit(39,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(23,Sc[getBlockIdx(23)])) - (getDecValue(getBit(34,Sc[getBlockIdx(34)]))) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + getDecValue(getBit(70,Sc[getBlockIdx(70)])) + getDecValue(getBit(84,Sc[getBlockIdx(84)])) + getDecValue(getBit(5,Sc[getBlockIdx(5)])) + (short)(-1)),&S[getBlockIdx(39)]); setBit(40,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)]))),&S[getBlockIdx(40)]); setBit(41,SIGN( - (getDecValue(getBit(22,Sc[getBlockIdx(22)]))) - (getDecValue(getBit(23,Sc[getBlockIdx(23)]))) - (getDecValue(getBit(24,Sc[getBlockIdx(24)]))) - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + getDecValue(getBit(72,Sc[getBlockIdx(72)])) + (short) 1),&S[getBlockIdx(41)]); setBit(42,SIGN( - (getDecValue(getBit(41,Sc[getBlockIdx(41)]))) + (short) 1),&S[getBlockIdx(42)]); setBit(43,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)])) - (getDecValue(getBit(44,Sc[getBlockIdx(44)]))) - (getDecValue(getBit(60,Sc[getBlockIdx(60)]))) + getDecValue(getBit(88,Sc[getBlockIdx(88)])) + (short) 1),&S[getBlockIdx(43)]); setBit(44,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(21,Sc[getBlockIdx(21)])) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(45,Sc[getBlockIdx(45)]))) - (getDecValue(getBit(46,Sc[getBlockIdx(46)])))),&S[getBlockIdx(44)]); setBit(45,SIGN( - (getDecValue(getBit(34,Sc[getBlockIdx(34)]))) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) - (getDecValue(getBit(44,Sc[getBlockIdx(44)]))) + getDecValue(getBit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(45)]); setBit(46,SIGN( - (getDecValue(getBit(22,Sc[getBlockIdx(22)]))) - (getDecValue(getBit(24,Sc[getBlockIdx(24)]))) - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) - (getDecValue(getBit(34,Sc[getBlockIdx(34)]))) + (short) 1),&S[getBlockIdx(46)]); setBit(47,SIGN( - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(46,Sc[getBlockIdx(46)]))) + (short) 1),&S[getBlockIdx(47)]); setBit(48,SIGN( - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) - (getDecValue(getBit(53,Sc[getBlockIdx(53)]))) - (getDecValue(getBit(54,Sc[getBlockIdx(54)]))) - (getDecValue(getBit(60,Sc[getBlockIdx(60)]))) + (short)2),&S[getBlockIdx(48)]); setBit(49,SIGN( - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + (short) 1),&S[getBlockIdx(49)]); setBit(50,SIGN( + getDecValue(getBit(16,Sc[getBlockIdx(16)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + (short)(-3)),&S[getBlockIdx(50)]); setBit(51,SIGN( + getDecValue(getBit(51,Sc[getBlockIdx(51)])) - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) - (getDecValue(getBit(56,Sc[getBlockIdx(56)]))) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)]))) + getDecValue(getBit(80,Sc[getBlockIdx(80)])) + getDecValue(getBit(81,Sc[getBlockIdx(81)]))),&S[getBlockIdx(51)]); setBit(52,SIGN( - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(55,Sc[getBlockIdx(55)]))) - (getDecValue(getBit(56,Sc[getBlockIdx(56)]))) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)]))) + (short) 1),&S[getBlockIdx(52)]); setBit(53,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) - (getDecValue(getBit(29,Sc[getBlockIdx(29)]))) - (getDecValue(getBit(29,Sc[getBlockIdx(29)]))) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)]))) - (getDecValue(getBit(64,Sc[getBlockIdx(64)]))) - (getDecValue(getBit(69,Sc[getBlockIdx(69)]))) + getDecValue(getBit(70,Sc[getBlockIdx(70)])) + getDecValue(getBit(84,Sc[getBlockIdx(84)])) + getDecValue(getBit(5,Sc[getBlockIdx(5)]))),&S[getBlockIdx(53)]); setBit(54,SIGN( - (getDecValue(getBit(48,Sc[getBlockIdx(48)]))) + getDecValue(getBit(49,Sc[getBlockIdx(49)])) - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(58,Sc[getBlockIdx(58)]))) - (getDecValue(getBit(59,Sc[getBlockIdx(59)])))),&S[getBlockIdx(54)]); setBit(55,SIGN( - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) + getDecValue(getBit(56,Sc[getBlockIdx(56)])) + (short) 1),&S[getBlockIdx(55)]); setBit(56,SIGN( + getDecValue(getBit(52,Sc[getBlockIdx(52)])) - (getDecValue(getBit(55,Sc[getBlockIdx(55)])))),&S[getBlockIdx(56)]); setBit(57,SIGN( + getDecValue(getBit(51,Sc[getBlockIdx(51)])) + getDecValue(getBit(52,Sc[getBlockIdx(52)])) - (getDecValue(getBit(55,Sc[getBlockIdx(55)]))) + getDecValue(getBit(56,Sc[getBlockIdx(56)])) + getDecValue(getBit(57,Sc[getBlockIdx(57)]))),&S[getBlockIdx(57)]); setBit(58,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) - (getDecValue(getBit(38,Sc[getBlockIdx(38)]))) - (getDecValue(getBit(51,Sc[getBlockIdx(51)]))) - (getDecValue(getBit(52,Sc[getBlockIdx(52)]))) - (getDecValue(getBit(53,Sc[getBlockIdx(53)]))) + getDecValue(getBit(62,Sc[getBlockIdx(62)])) + (short) 1),&S[getBlockIdx(58)]); setBit(59,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) - (getDecValue(getBit(38,Sc[getBlockIdx(38)]))) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + getDecValue(getBit(62,Sc[getBlockIdx(62)])) - (getDecValue(getBit(78,Sc[getBlockIdx(78)]))) + (short) 1),&S[getBlockIdx(59)]); setBit(60,SIGN( + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) - (getDecValue(getBit(50,Sc[getBlockIdx(50)]))) - (getDecValue(getBit(87,Sc[getBlockIdx(87)]))) + (short) 1),&S[getBlockIdx(60)]); setBit(61,SIGN( + getDecValue(getBit(3,Sc[getBlockIdx(3)])) + getDecValue(getBit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(61)]); setBit(62,SIGN( + getDecValue(getBit(61,Sc[getBlockIdx(61)])) + getDecValue(getBit(85,Sc[getBlockIdx(85)])) + (short)(-1)),&S[getBlockIdx(62)]); setBit(63,SIGN( + getDecValue(getBit(61,Sc[getBlockIdx(61)]))),&S[getBlockIdx(63)]); setBit(64,SIGN( + getDecValue(getBit(62,Sc[getBlockIdx(62)])) + getDecValue(getBit(85,Sc[getBlockIdx(85)]))),&S[getBlockIdx(64)]); setBit(65,SIGN( + getDecValue(getBit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(65)]); setBit(66,SIGN( + getDecValue(getBit(65,Sc[getBlockIdx(65)]))),&S[getBlockIdx(66)]); setBit(67,SIGN( + getDecValue(getBit(66,Sc[getBlockIdx(66)]))),&S[getBlockIdx(67)]); setBit(68,SIGN( + getDecValue(getBit(8,Sc[getBlockIdx(8)]))),&S[getBlockIdx(68)]); setBit(69,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + (short)2),&S[getBlockIdx(69)]); setBit(70,SIGN( + getDecValue(getBit(23,Sc[getBlockIdx(23)])) + getDecValue(getBit(68,Sc[getBlockIdx(68)]))),&S[getBlockIdx(70)]); setBit(71,SIGN( - (getDecValue(getBit(35,Sc[getBlockIdx(35)]))) - (getDecValue(getBit(83,Sc[getBlockIdx(83)])))),&S[getBlockIdx(71)]); setBit(72,SIGN( - (getDecValue(getBit(1,Sc[getBlockIdx(1)]))) + getDecValue(getBit(11,Sc[getBlockIdx(11)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(87,Sc[getBlockIdx(87)])) + (short) 1),&S[getBlockIdx(72)]); setBit(73,SIGN( - (getDecValue(getBit(25,Sc[getBlockIdx(25)]))) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) - (getDecValue(getBit(44,Sc[getBlockIdx(44)]))) + getDecValue(getBit(45,Sc[getBlockIdx(45)])) - (getDecValue(getBit(47,Sc[getBlockIdx(47)]))) + getDecValue(getBit(66,Sc[getBlockIdx(66)])) + getDecValue(getBit(67,Sc[getBlockIdx(67)]))),&S[getBlockIdx(73)]); setBit(74,SIGN( + getDecValue(getBit(73,Sc[getBlockIdx(73)]))),&S[getBlockIdx(74)]); setBit(75,SIGN( + getDecValue(getBit(66,Sc[getBlockIdx(66)])) + getDecValue(getBit(74,Sc[getBlockIdx(74)]))),&S[getBlockIdx(75)]); setBit(76,SIGN( - (getDecValue(getBit(21,Sc[getBlockIdx(21)]))) - (getDecValue(getBit(32,Sc[getBlockIdx(32)]))) - (getDecValue(getBit(95,Sc[getBlockIdx(95)]))) + (short)3),&S[getBlockIdx(76)]); setBit(77,SIGN( + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + (short)(-1)),&S[getBlockIdx(77)]); setBit(78,SIGN( + getDecValue(getBit(12,Sc[getBlockIdx(12)])) + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(25,Sc[getBlockIdx(25)])) + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) - (getDecValue(getBit(62,Sc[getBlockIdx(62)]))) - (getDecValue(getBit(91,Sc[getBlockIdx(91)]))) + (short)(-4)),&S[getBlockIdx(78)]); setBit(79,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)]))),&S[getBlockIdx(79)]); setBit(80,SIGN( + getDecValue(getBit(49,Sc[getBlockIdx(49)])) + getDecValue(getBit(54,Sc[getBlockIdx(54)])) + (short)(-1)),&S[getBlockIdx(80)]); setBit(81,SIGN( + getDecValue(getBit(55,Sc[getBlockIdx(55)])) + getDecValue(getBit(57,Sc[getBlockIdx(57)])) + (short)(-1)),&S[getBlockIdx(81)]); setBit(82,SIGN( + getDecValue(getBit(3,Sc[getBlockIdx(3)]))),&S[getBlockIdx(82)]); setBit(83,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + getDecValue(getBit(59,Sc[getBlockIdx(59)]))),&S[getBlockIdx(83)]); setBit(84,SIGN( + getDecValue(getBit(31,Sc[getBlockIdx(31)])) - (getDecValue(getBit(82,Sc[getBlockIdx(82)])))),&S[getBlockIdx(84)]); setBit(85,SIGN( - (getDecValue(getBit(38,Sc[getBlockIdx(38)]))) + (short) 1),&S[getBlockIdx(85)]); setBit(86,SIGN( + getDecValue(getBit(19,Sc[getBlockIdx(19)])) + getDecValue(getBit(33,Sc[getBlockIdx(33)]))),&S[getBlockIdx(86)]); setBit(87,SIGN( + getDecValue(getBit(9,Sc[getBlockIdx(9)]))),&S[getBlockIdx(87)]); setBit(88,SIGN( + getDecValue(getBit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(88)]); setBit(89,SIGN( + getDecValue(getBit(87,Sc[getBlockIdx(87)]))),&S[getBlockIdx(89)]); setBit(90,SIGN( + getDecValue(getBit(24,Sc[getBlockIdx(24)])) + getDecValue(getBit(86,Sc[getBlockIdx(86)]))),&S[getBlockIdx(90)]); setBit(91,SIGN( - (getDecValue(getBit(90,Sc[getBlockIdx(90)]))) + (short) 1),&S[getBlockIdx(91)]); setBit(92,SIGN( + getDecValue(getBit(7,Sc[getBlockIdx(7)])) + getDecValue(getBit(43,Sc[getBlockIdx(43)])) + (short)(-1)),&S[getBlockIdx(92)]); setBit(93,SIGN( + getDecValue(getBit(34,Sc[getBlockIdx(34)])) + getDecValue(getBit(38,Sc[getBlockIdx(38)])) + (short)(-1)),&S[getBlockIdx(93)]); setBit(94,SIGN( + getDecValue(getBit(93,Sc[getBlockIdx(93)]))),&S[getBlockIdx(94)]); setBit(95,SIGN( + getDecValue(getBit(21,Sc[getBlockIdx(21)])) - (getDecValue(getBit(29,Sc[getBlockIdx(29)]))) - (getDecValue(getBit(43,Sc[getBlockIdx(43)]))) + getDecValue(getBit(61,Sc[getBlockIdx(61)])) + (short)(-1)),&S[getBlockIdx(95)]); } __global__ void findAttractor(uint32 *attractors, uint32 *transients, uint32 *periods, uint32_t numThreads){ int transient = 0, period = 0; uint32 S0[N], S1[N]; uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; uint32_t step = NUM_STATES / NUM_COPYS; uint32_t rest = NUM_STATES % NUM_COPYS; uint32_t begin = 0; uint32_t end = step - 1; curandState_t state; curand_init(thread, /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 1, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); bool flag = true; if(thread < numThreads){ if(rest > 0){ end = end + 1; rest = rest - 1; }else{ flag = false; } for(uint32_t i = 0; i < NUM_COPYS;i++){ if(i == thread) break; if(rest > 0){ end = end + 1; begin = begin + 1; rest = rest - 1; }else if(rest == 0 && flag){ begin = begin + 1; flag = 0; } begin += step; end += step; } for (uint32 i = begin; i <= end; ++i) { S0[0] = S1[0] = curand(&state); S0[1] = S1[1] = curand(&state); S0[2] = S1[2] = curand(&state); setBit(0, 1, &S0[getBlockIdx(0)]); // garante as entradas fixas setBit(1, 1, &S0[getBlockIdx(1)]); setBit(2, 0, &S0[getBlockIdx(2)]); setBit(3, 0, &S0[getBlockIdx(3)]); setBit(4, 0, &S0[getBlockIdx(4)]); setBit(5, 0, &S0[getBlockIdx(5)]); setBit(6, 1, &S0[getBlockIdx(6)]); setBit(7, 1, &S0[getBlockIdx(7)]); setBit(0, 1, &S1[getBlockIdx(0)]); setBit(1, 1, &S1[getBlockIdx(1)]); setBit(2, 0, &S1[getBlockIdx(2)]); setBit(3, 0, &S1[getBlockIdx(3)]); setBit(4, 0, &S1[getBlockIdx(4)]); setBit(5, 0, &S1[getBlockIdx(5)]); setBit(6, 1, &S1[getBlockIdx(6)]); setBit(7, 1, &S1[getBlockIdx(7)]); transient = 0; period = 0; do{ pass(S0); pass(S0); pass(S1); transient++; }while(!comp(S0,S1)); do{ pass(S0); period++; }while (!comp(S0,S1)); period--; //printf("%u %u, %u , %u, Trans %u, Per %u\n",i,S0[0], S0[1], S0[2],transient,period); transients[i] = transient; periods[i]= period; for(int s = 0; s < N; s++){ attractors[(i * N) + s] = S0[s]; } //cout << transient << " " << period << std::endl; } } } int main() { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size size_t numState = NUM_STATES; size_t size = N * numState * sizeof(uint32); size_t size_transients = numState*sizeof(uint32); size_t size_periods = numState*sizeof(uint32); size_t totalBytes = size+size_transients+size_periods; size_t kb = totalBytes/(1024); size_t mb = kb/(1024); size_t gb = mb/(1024); printf("Find attractors net %d nodes in %lu initials states.\n", N,numState); printf("Memory usage: %lu Gb or %lu Mb or %lu Kb.\n", gb, mb, kb); uint32 *h_transients = (uint32*)malloc(size_transients); // Verifica se houve sucesso na alocação do vetor h_transients if (h_transients == NULL){ fprintf(stderr, "Failed to allocate h_transients!\n"); exit(EXIT_FAILURE); } uint32 *h_periods = (uint32*)malloc(size_periods); // Verifica se houve sucesso na alocação do vetor h_periods if (h_periods == NULL){ fprintf(stderr, "Failed to allocate h_periods!\n"); exit(EXIT_FAILURE); } //Aloca o vetor para a saida no host uint32 *h_attractors = (uint32*)malloc(size); // Verifica se houve sucesso na alocação do vetor h_attractors if (h_attractors == NULL){ fprintf(stderr, "Failed to allocate h_attractors!\n"); exit(EXIT_FAILURE); } // Aloca os vetores na GPU (device) uint32 *d_transients = NULL; err = cudaMalloc((void **)&d_transients, NUM_STATES * sizeof(uint32)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate d_transients (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } uint32 *d_periods = NULL; err = cudaMalloc((void **)&d_periods, NUM_STATES * sizeof(uint32)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate d_periods (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } uint32 *d_attractors = NULL; err = cudaMalloc((void **)&d_attractors, size); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate d_attractors (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel uint32_t threadsPerBlock = 256; uint32_t blocksPerGrid = (NUM_COPYS + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); findAttractor<<< blocksPerGrid, threadsPerBlock >>>(d_attractors, d_transients, d_periods, NUM_COPYS); //findAttractor<<< 1,1 >>>(d_attractors, d_transients, d_periods, NUM_COPYS); err = cudaGetLastError(); if (err != cudaSuccess){ fprintf(stderr, "Failed to launch findAttractor kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_transients, d_transients, size_transients, cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector d_transients from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(h_periods, d_periods, size_periods, cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector d_periods from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(h_attractors, d_attractors, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector d_attractors from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } ///* // printf("Attractor found:\n"); // // // for(int i = 0; i < numState; i++){ // for(int j = 0; j < N; j++){ // printf("%u ", h_attractors[(i * N) + j]); // } // printf(" Trans:%u Per:%u\n", h_transients[i],h_periods[i]); // } // printf("\n"); //*/ err = cudaFree(d_transients); if (err != cudaSuccess){ fprintf(stderr, "Failed to free device vector d_transients (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_periods); if (err != cudaSuccess){ fprintf(stderr, "Failed to free device vector d_periods (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_attractors); if (err != cudaSuccess){ fprintf(stderr, "Failed to free device vector d_attractors (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_transients); free(h_periods); free(h_attractors); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess){ fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
732b1073764d3807ac4a30e258da6322a780ab6c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdint.h> #include <cstdint> #include <numeric> #include <hip/hip_cooperative_groups.h> namespace cg = cooperative_groups; //********************** CUDA_ERROR inline void HandleError(hipError_t err, const char *file, int line) { //Error handling micro, wrap it around function whenever possible if (err != hipSuccess) { printf("\n%s in %s at line %d\n", hipGetErrorString(err), file, line); #ifdef _WIN32 system("pause"); #else exit(EXIT_FAILURE); #endif } } #define CUDA_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //****************************************************************************** //********************** testing cg kernel __global__ void testing_cg_grid_sync(const uint32_t num_elements, uint32_t *d_arr){ uint32_t tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_elements){ uint32_t my_element = d_arr[tid]; //to sync across the whole grid cg::grid_group barrier = cg::this_grid(); //to sync within a single block //cg::thread_block barrier = cg::this_thread_block(); //wait for all reads barrier.sync(); uint32_t tar_id = num_elements - tid - 1; d_arr[tar_id] = my_element; } return; } //****************************************************************************** //********************** execute void execute_test(const int sm_count){ //host array //const uint32_t arr_size = 1 << 20; //1M const uint32_t arr_size = 1680*80; uint32_t* h_arr = (uint32_t*)malloc(arr_size * sizeof(uint32_t)); //with with sequential numbers std::iota(h_arr, h_arr + arr_size, 0); //device array uint32_t* d_arr; CUDA_ERROR(hipMalloc((void**)&d_arr, arr_size*sizeof(uint32_t))); CUDA_ERROR(hipMemcpy(d_arr, h_arr, arr_size*sizeof(uint32_t), hipMemcpyHostToDevice)); //launch config const int threads = 80; //following the same steps done in conjugateGradientMultiBlockCG.cu //cuda sample to launch kernel that sync across grid //https://github.com/NVIDIA/cuda-samples/blob/master/Samples/conjugateGradientMultiBlockCG/conjugateGradientMultiBlockCG.cu#L436 int num_blocks_per_sm = 0; CUDA_ERROR(hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, (void*)testing_cg_grid_sync, threads, 0)); dim3 grid_dim(sm_count * num_blocks_per_sm, 1, 1), block_dim(threads, 1, 1); printf("\n Launching %d blcoks, each containing %d threads \n", grid_dim.x, block_dim.x); if(arr_size > grid_dim.x*block_dim.x){ printf("\n The grid size (numBlocks*numThreads) is less than array size.\n"); printf("This will result into mismatch error (incorrect output erro)\n"); exit(EXIT_FAILURE); } if((int(grid_dim.x*block_dim.x) - int(arr_size)) / threads > 0 ){ printf("\n At least one block might not see the sync barrier. This will (probabily) result into the code never exits.\n"); exit(EXIT_FAILURE); } //argument passed to the kernel void *kernel_args[] = { (void *)&arr_size, (void *)&d_arr,}; //finally launch the kernel hipLaunchCooperativeKernel((void*)testing_cg_grid_sync, grid_dim, block_dim, kernel_args); //make sure everything went okay CUDA_ERROR(hipGetLastError()); CUDA_ERROR(hipDeviceSynchronize()); //get results on the host CUDA_ERROR(hipMemcpy(h_arr, d_arr, arr_size*sizeof(uint32_t), hipMemcpyDeviceToHost)); //validate for (uint32_t i = 0; i < arr_size; i++){ if (h_arr[i] != arr_size - i - 1){ printf("\n Result mismatch in h_arr[%u] = %u\n", i, h_arr[i]); exit(EXIT_FAILURE); } } } //****************************************************************************** int main(int argc, char**argv) { //set to Titan V uint32_t device_id = 0; hipSetDevice(device_id); //get sm count hipDeviceProp_t devProp; CUDA_ERROR(hipGetDeviceProperties(&devProp, device_id)); int sm_count = devProp.multiProcessorCount; //execute execute_test(sm_count); printf("\n Mission accomplished \n"); return 0; }
732b1073764d3807ac4a30e258da6322a780ab6c.cu
#include <cuda_runtime_api.h> #include <stdio.h> #include <stdint.h> #include <cstdint> #include <numeric> #include <cooperative_groups.h> namespace cg = cooperative_groups; //********************** CUDA_ERROR inline void HandleError(cudaError_t err, const char *file, int line) { //Error handling micro, wrap it around function whenever possible if (err != cudaSuccess) { printf("\n%s in %s at line %d\n", cudaGetErrorString(err), file, line); #ifdef _WIN32 system("pause"); #else exit(EXIT_FAILURE); #endif } } #define CUDA_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //****************************************************************************** //********************** testing cg kernel __global__ void testing_cg_grid_sync(const uint32_t num_elements, uint32_t *d_arr){ uint32_t tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < num_elements){ uint32_t my_element = d_arr[tid]; //to sync across the whole grid cg::grid_group barrier = cg::this_grid(); //to sync within a single block //cg::thread_block barrier = cg::this_thread_block(); //wait for all reads barrier.sync(); uint32_t tar_id = num_elements - tid - 1; d_arr[tar_id] = my_element; } return; } //****************************************************************************** //********************** execute void execute_test(const int sm_count){ //host array //const uint32_t arr_size = 1 << 20; //1M const uint32_t arr_size = 1680*80; uint32_t* h_arr = (uint32_t*)malloc(arr_size * sizeof(uint32_t)); //with with sequential numbers std::iota(h_arr, h_arr + arr_size, 0); //device array uint32_t* d_arr; CUDA_ERROR(cudaMalloc((void**)&d_arr, arr_size*sizeof(uint32_t))); CUDA_ERROR(cudaMemcpy(d_arr, h_arr, arr_size*sizeof(uint32_t), cudaMemcpyHostToDevice)); //launch config const int threads = 80; //following the same steps done in conjugateGradientMultiBlockCG.cu //cuda sample to launch kernel that sync across grid //https://github.com/NVIDIA/cuda-samples/blob/master/Samples/conjugateGradientMultiBlockCG/conjugateGradientMultiBlockCG.cu#L436 int num_blocks_per_sm = 0; CUDA_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, (void*)testing_cg_grid_sync, threads, 0)); dim3 grid_dim(sm_count * num_blocks_per_sm, 1, 1), block_dim(threads, 1, 1); printf("\n Launching %d blcoks, each containing %d threads \n", grid_dim.x, block_dim.x); if(arr_size > grid_dim.x*block_dim.x){ printf("\n The grid size (numBlocks*numThreads) is less than array size.\n"); printf("This will result into mismatch error (incorrect output erro)\n"); exit(EXIT_FAILURE); } if((int(grid_dim.x*block_dim.x) - int(arr_size)) / threads > 0 ){ printf("\n At least one block might not see the sync barrier. This will (probabily) result into the code never exits.\n"); exit(EXIT_FAILURE); } //argument passed to the kernel void *kernel_args[] = { (void *)&arr_size, (void *)&d_arr,}; //finally launch the kernel cudaLaunchCooperativeKernel((void*)testing_cg_grid_sync, grid_dim, block_dim, kernel_args); //make sure everything went okay CUDA_ERROR(cudaGetLastError()); CUDA_ERROR(cudaDeviceSynchronize()); //get results on the host CUDA_ERROR(cudaMemcpy(h_arr, d_arr, arr_size*sizeof(uint32_t), cudaMemcpyDeviceToHost)); //validate for (uint32_t i = 0; i < arr_size; i++){ if (h_arr[i] != arr_size - i - 1){ printf("\n Result mismatch in h_arr[%u] = %u\n", i, h_arr[i]); exit(EXIT_FAILURE); } } } //****************************************************************************** int main(int argc, char**argv) { //set to Titan V uint32_t device_id = 0; cudaSetDevice(device_id); //get sm count cudaDeviceProp devProp; CUDA_ERROR(cudaGetDeviceProperties(&devProp, device_id)); int sm_count = devProp.multiProcessorCount; //execute execute_test(sm_count); printf("\n Mission accomplished \n"); return 0; }
6d36e1eb2ff25ea1ecb79661fd8a7a4b7276b1a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Natural order, Size 8 Inverse FFT Kernel. // Written by Jason Groothuis bSc based on work by Vasily Volkov. #include "codelets.h" __global__ void oIFFT8_device_ps( float *ps, float2 *worksrc) { int tid = threadIdx.x; int bid = blockIdx.y * gridDim.x + blockIdx.x; int hi = tid>>3; int lo = tid&7; worksrc += bid * 512; ps += bid * 512; float2 a[8]; __shared__ float2 smem[64*9]; // load<8>( a, worksrc, 64 ); // Replace Original // ... instead loading to shared mem first, avoiding bank conflicts #pragma unroll for (int i=0; i < 8; i++) smem[hi*8*9+lo+i*9] = worksrc[i*64+tid]; // Stride 64 input straight to shared memory, transposing 64x8. // ...now load the registers from shared mem (faster) #pragma unroll for (int i=0; i < 8; i++) a[i] = smem[tid*9+i]; // IFFT8( a ); // Replace Original , Partial de-macroing gains ~1 GFlop IFFT2( a[0], a[4] ); IFFT2( a[1], a[5] ); IFFT2( a[2], a[6] ); IFFT2( a[3], a[7] ); float a5x = a[5].x; float a6x = a[6].x; float a7x = a[7].x; a[5].x = (a5x-a[5].y)* M_SQRT1_2f; a[5].y = (a5x+a[5].y)* M_SQRT1_2f; a[6].x = -a[6].y; a[6].y = a6x; a[7].x = (-a7x-a[7].y )* M_SQRT1_2f; a[7].y = ( a7x-a[7].y )* M_SQRT1_2f; IFFT4( a[0], a[1], a[2], a[3] ); IFFT4( a[4], a[5], a[6], a[7] ); //store directly from the registers or via shared mem, transposing 8x64, becoming natural order power spectrum #pragma unroll for (int i=0; i < 8; i++) smem[tid*9+i] = a[rev<8>(i)]; #pragma unroll for (int i=0; i < 8; i++) { float2 freqData = smem[hi*8*9+lo+i*9]; // workdst[i*64+tid] = smem[hi*8*9+lo+i*9]; // stride 64, ~72 GFlops // PowerSpectrum[i] = freqData.x * freqData.x + freqData.y * freqData.y; ps[i*64+tid] = freqData.x * freqData.x + freqData.y * freqData.y; } } extern "C" void oIFFT8ps( float *ps, float2 *worksrc, int batch ) { hipLaunchKernelGGL(( oIFFT8_device_ps), dim3(grid2D(batch/64)), dim3(64) , 0, 0, ps, worksrc ); }
6d36e1eb2ff25ea1ecb79661fd8a7a4b7276b1a7.cu
// Natural order, Size 8 Inverse FFT Kernel. // Written by Jason Groothuis bSc based on work by Vasily Volkov. #include "codelets.h" __global__ void oIFFT8_device_ps( float *ps, float2 *worksrc) { int tid = threadIdx.x; int bid = blockIdx.y * gridDim.x + blockIdx.x; int hi = tid>>3; int lo = tid&7; worksrc += bid * 512; ps += bid * 512; float2 a[8]; __shared__ float2 smem[64*9]; // load<8>( a, worksrc, 64 ); // Replace Original // ... instead loading to shared mem first, avoiding bank conflicts #pragma unroll for (int i=0; i < 8; i++) smem[hi*8*9+lo+i*9] = worksrc[i*64+tid]; // Stride 64 input straight to shared memory, transposing 64x8. // ...now load the registers from shared mem (faster) #pragma unroll for (int i=0; i < 8; i++) a[i] = smem[tid*9+i]; // IFFT8( a ); // Replace Original , Partial de-macroing gains ~1 GFlop IFFT2( a[0], a[4] ); IFFT2( a[1], a[5] ); IFFT2( a[2], a[6] ); IFFT2( a[3], a[7] ); float a5x = a[5].x; float a6x = a[6].x; float a7x = a[7].x; a[5].x = (a5x-a[5].y)* M_SQRT1_2f; a[5].y = (a5x+a[5].y)* M_SQRT1_2f; a[6].x = -a[6].y; a[6].y = a6x; a[7].x = (-a7x-a[7].y )* M_SQRT1_2f; a[7].y = ( a7x-a[7].y )* M_SQRT1_2f; IFFT4( a[0], a[1], a[2], a[3] ); IFFT4( a[4], a[5], a[6], a[7] ); //store directly from the registers or via shared mem, transposing 8x64, becoming natural order power spectrum #pragma unroll for (int i=0; i < 8; i++) smem[tid*9+i] = a[rev<8>(i)]; #pragma unroll for (int i=0; i < 8; i++) { float2 freqData = smem[hi*8*9+lo+i*9]; // workdst[i*64+tid] = smem[hi*8*9+lo+i*9]; // stride 64, ~72 GFlops // PowerSpectrum[i] = freqData.x * freqData.x + freqData.y * freqData.y; ps[i*64+tid] = freqData.x * freqData.x + freqData.y * freqData.y; } } extern "C" void oIFFT8ps( float *ps, float2 *worksrc, int batch ) { oIFFT8_device_ps<<< grid2D(batch/64), 64 >>>( ps, worksrc ); }
24d18df5467b0b6875f83a56b163fc7e4ecca862.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang) * Xiaomi Corporation (authors: Haowen Qiu) * * See LICENSE for clarification regarding multiple authors */ #include <cstdlib> #include <mutex> // NOLINT #include "k2/csrc/context.h" #include "k2/csrc/log.h" #include "k2/csrc/nvtx.h" namespace k2 { static constexpr std::size_t kAlignment = 64; // TODO(haowen): most of implementations below should be updated later. class CpuContext : public Context { public: CpuContext() = default; ContextPtr GetCpuContext() override { return shared_from_this(); } DeviceType GetDeviceType() const override { return kCpu; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { int32_t ret = posix_memalign(&p, kAlignment, bytes); K2_CHECK_EQ(ret, 0); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCpu; } void Deallocate(void *data, void * /*deleter_context*/) override { free(data); } }; class CudaContext : public Context { public: explicit CudaContext(int32_t gpu_id) : gpu_id_(gpu_id) { if (gpu_id_ != -1) { auto ret = hipSetDevice(gpu_id_); K2_CHECK_CUDA_ERROR(ret); } // TODO(haowen): choose one from available GPUs if gpu_id == -1? // and handle GPU ids from multiple machines. auto ret = hipStreamCreate(&stream_); K2_CHECK_CUDA_ERROR(ret); } ContextPtr GetCpuContext() override { return k2::GetCpuContext(); } DeviceType GetDeviceType() const override { return kCuda; } int32_t GetDeviceId() const override { return gpu_id_; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { auto ret = hipMalloc(&p, bytes); K2_CHECK_CUDA_ERROR(ret); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_; } void Deallocate(void *data, void * /*deleter_context*/) override { auto ret = hipFree(data); K2_CHECK_CUDA_ERROR(ret); } hipStream_t GetCudaStream() const override { return g_stream_override.OverrideStream(stream_); } void Sync() const override { auto ret = hipStreamSynchronize(stream_); K2_CHECK_CUDA_ERROR(ret); } ~CudaContext() { auto ret = hipStreamDestroy(stream_); K2_CHECK_CUDA_ERROR(ret); } private: int32_t gpu_id_; hipStream_t stream_; }; ContextPtr GetCpuContext() { return std::make_shared<CpuContext>(); } ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) { static std::once_flag has_cuda_init_flag; static bool has_cuda = false; std::call_once(has_cuda_init_flag, []() { int n = 0; auto ret = hipGetDeviceCount(&n); if (ret == hipSuccess && n > 0) has_cuda = true; else K2_LOG(WARNING) << "CUDA is not available. Return a CPU context."; }); if (has_cuda) return std::make_shared<CudaContext>(gpu_id); return GetCpuContext(); } } // namespace k2
24d18df5467b0b6875f83a56b163fc7e4ecca862.cu
/** * Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang) * Xiaomi Corporation (authors: Haowen Qiu) * * See LICENSE for clarification regarding multiple authors */ #include <cstdlib> #include <mutex> // NOLINT #include "k2/csrc/context.h" #include "k2/csrc/log.h" #include "k2/csrc/nvtx.h" namespace k2 { static constexpr std::size_t kAlignment = 64; // TODO(haowen): most of implementations below should be updated later. class CpuContext : public Context { public: CpuContext() = default; ContextPtr GetCpuContext() override { return shared_from_this(); } DeviceType GetDeviceType() const override { return kCpu; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { int32_t ret = posix_memalign(&p, kAlignment, bytes); K2_CHECK_EQ(ret, 0); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCpu; } void Deallocate(void *data, void * /*deleter_context*/) override { free(data); } }; class CudaContext : public Context { public: explicit CudaContext(int32_t gpu_id) : gpu_id_(gpu_id) { if (gpu_id_ != -1) { auto ret = cudaSetDevice(gpu_id_); K2_CHECK_CUDA_ERROR(ret); } // TODO(haowen): choose one from available GPUs if gpu_id == -1? // and handle GPU ids from multiple machines. auto ret = cudaStreamCreate(&stream_); K2_CHECK_CUDA_ERROR(ret); } ContextPtr GetCpuContext() override { return k2::GetCpuContext(); } DeviceType GetDeviceType() const override { return kCuda; } int32_t GetDeviceId() const override { return gpu_id_; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { auto ret = cudaMalloc(&p, bytes); K2_CHECK_CUDA_ERROR(ret); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_; } void Deallocate(void *data, void * /*deleter_context*/) override { auto ret = cudaFree(data); K2_CHECK_CUDA_ERROR(ret); } cudaStream_t GetCudaStream() const override { return g_stream_override.OverrideStream(stream_); } void Sync() const override { auto ret = cudaStreamSynchronize(stream_); K2_CHECK_CUDA_ERROR(ret); } ~CudaContext() { auto ret = cudaStreamDestroy(stream_); K2_CHECK_CUDA_ERROR(ret); } private: int32_t gpu_id_; cudaStream_t stream_; }; ContextPtr GetCpuContext() { return std::make_shared<CpuContext>(); } ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) { static std::once_flag has_cuda_init_flag; static bool has_cuda = false; std::call_once(has_cuda_init_flag, []() { int n = 0; auto ret = cudaGetDeviceCount(&n); if (ret == cudaSuccess && n > 0) has_cuda = true; else K2_LOG(WARNING) << "CUDA is not available. Return a CPU context."; }); if (has_cuda) return std::make_shared<CudaContext>(gpu_id); return GetCpuContext(); } } // namespace k2
f0b70ed8751852c0364eadfe86c09ea5ed274de4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <julia.h> #include <hip/hip_runtime.h> __global__ void sequence_gpu(int *d_ptr, int length) { int elemID = blockIdx.x * blockDim.x + threadIdx.x; if (elemID < length) { unsigned int laneid; asm("mov.u32 %0, %%laneid;" : "=r"(laneid)); d_ptr[elemID] = laneid; } } int main(void) { jl_init("/home/guo/julia/usr/lib"); jl_eval_string("println(\"julia init successfully\n\")"); const int N = 1000; size_t size = N*sizeof(int); int *d_ptr,*h_ptr; hipMalloc((void **)&d_ptr, size); hipHostMalloc((void **)&h_ptr,size); dim3 dim_block(64,1); dim3 dim_grid(N/dim_block.x+1,1); hipLaunchKernelGGL(( sequence_gpu), dim3(dim_grid), dim3(dim_block), 0, 0, d_ptr,N); hipMemcpy(h_ptr,d_ptr,size,hipMemcpyDeviceToHost); for (int i=0;i<N;i++) { printf("%d\t",h_ptr[i]); if (0 == i % 10) { printf("\n"); } } printf("\n"); hipFree(d_ptr); hipHostFree(h_ptr); jl_atexit_hook(0); hipDeviceReset(); return 0; }
f0b70ed8751852c0364eadfe86c09ea5ed274de4.cu
#include <stdio.h> #include <assert.h> #include <julia.h> #include <cuda_runtime.h> __global__ void sequence_gpu(int *d_ptr, int length) { int elemID = blockIdx.x * blockDim.x + threadIdx.x; if (elemID < length) { unsigned int laneid; asm("mov.u32 %0, %%laneid;" : "=r"(laneid)); d_ptr[elemID] = laneid; } } int main(void) { jl_init("/home/guo/julia/usr/lib"); jl_eval_string("println(\"julia init successfully\n\")"); const int N = 1000; size_t size = N*sizeof(int); int *d_ptr,*h_ptr; cudaMalloc((void **)&d_ptr, size); cudaMallocHost((void **)&h_ptr,size); dim3 dim_block(64,1); dim3 dim_grid(N/dim_block.x+1,1); sequence_gpu<<<dim_grid, dim_block>>>(d_ptr,N); cudaMemcpy(h_ptr,d_ptr,size,cudaMemcpyDeviceToHost); for (int i=0;i<N;i++) { printf("%d\t",h_ptr[i]); if (0 == i % 10) { printf("\n"); } } printf("\n"); cudaFree(d_ptr); cudaFreeHost(h_ptr); jl_atexit_hook(0); cudaDeviceReset(); return 0; }
808d4940012e0fc7b25a916a93913a6522655f6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include <stdlib.h> #define Nrows 3 #define Ncols 5 #define Nmatrix 4 __global__ void fillMatrix (float *devPtr, size_t pitch, int matrix_type) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < Ncols) { switch (matrix_type) { case 0: { *((float * )((char *) devPtr + pitch * 0) + tid) = 1.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 3.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 5.0f; break; } case 1: { *((float * )((char *) devPtr + pitch * 0) + tid) = 2.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 4.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 6.0f; break; } case 2: { *((float * )((char *) devPtr + pitch * 0) + tid) = 5.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 15.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 30.0f; break; } case 3: { *((float * )((char *) devPtr + pitch * 0) + tid) = 10.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 30.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 90.0f; break; } } } } /********/ /* MAIN */ /********/ int main() { float **hostPtr; float **devPtr; size_t *pitch; hostPtr = (float**)malloc(sizeof(float*)*Nmatrix); for (int i = 0; i < Nmatrix; i++) hostPtr[i] = (float*)malloc(sizeof(float)*Ncols*Nrows); pitch = (size_t*)malloc(sizeof(size_t)*Nmatrix); devPtr = (float**)malloc(sizeof(float*)*Nmatrix); for (int i = 0; i < Nmatrix; i++) hipMallocPitch(&devPtr[i], &pitch[i], Ncols * sizeof(float), Nrows); for (int i = 0; i < Nmatrix; i++) hipLaunchKernelGGL(( fillMatrix), dim3(1),dim3(64), 0, 0, devPtr[i],pitch[i],i); for (int i = 0; i < Nmatrix; i++) hipMemcpy2D(hostPtr[i], Ncols * sizeof(float), devPtr[i], pitch[i], Ncols * sizeof(float), Nrows, hipMemcpyDeviceToHost); for (int k = 0; k < Nmatrix; k++) { printf("=========================================================================\n"); printf("Printing matrix %d\n",k); printf("Pitch = %d\n",pitch[k]); for (int i = 0; i < Nrows; i++) for (int j = 0; j < Ncols; j++) printf("row %i column %i value %f \n", i, j, hostPtr[k][i*Ncols+j]); printf("=========================================================================\n"); } for (int i = 0; i < Nmatrix; i++) { free(hostPtr[i]); hipFree(devPtr[i]); } free(hostPtr); free(devPtr); return 0; }
808d4940012e0fc7b25a916a93913a6522655f6a.cu
#include<stdio.h> #include <stdlib.h> #define Nrows 3 #define Ncols 5 #define Nmatrix 4 __global__ void fillMatrix (float *devPtr, size_t pitch, int matrix_type) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < Ncols) { switch (matrix_type) { case 0: { *((float * )((char *) devPtr + pitch * 0) + tid) = 1.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 3.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 5.0f; break; } case 1: { *((float * )((char *) devPtr + pitch * 0) + tid) = 2.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 4.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 6.0f; break; } case 2: { *((float * )((char *) devPtr + pitch * 0) + tid) = 5.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 15.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 30.0f; break; } case 3: { *((float * )((char *) devPtr + pitch * 0) + tid) = 10.0f; *((float * )((char *) devPtr + pitch * 1) + tid) = 30.0f; *((float * )((char *) devPtr + pitch * 2) + tid) = 90.0f; break; } } } } /********/ /* MAIN */ /********/ int main() { float **hostPtr; float **devPtr; size_t *pitch; hostPtr = (float**)malloc(sizeof(float*)*Nmatrix); for (int i = 0; i < Nmatrix; i++) hostPtr[i] = (float*)malloc(sizeof(float)*Ncols*Nrows); pitch = (size_t*)malloc(sizeof(size_t)*Nmatrix); devPtr = (float**)malloc(sizeof(float*)*Nmatrix); for (int i = 0; i < Nmatrix; i++) cudaMallocPitch(&devPtr[i], &pitch[i], Ncols * sizeof(float), Nrows); for (int i = 0; i < Nmatrix; i++) fillMatrix<<<1,64>>>(devPtr[i],pitch[i],i); for (int i = 0; i < Nmatrix; i++) cudaMemcpy2D(hostPtr[i], Ncols * sizeof(float), devPtr[i], pitch[i], Ncols * sizeof(float), Nrows, cudaMemcpyDeviceToHost); for (int k = 0; k < Nmatrix; k++) { printf("=========================================================================\n"); printf("Printing matrix %d\n",k); printf("Pitch = %d\n",pitch[k]); for (int i = 0; i < Nrows; i++) for (int j = 0; j < Ncols; j++) printf("row %i column %i value %f \n", i, j, hostPtr[k][i*Ncols+j]); printf("=========================================================================\n"); } for (int i = 0; i < Nmatrix; i++) { free(hostPtr[i]); cudaFree(devPtr[i]); } free(hostPtr); free(devPtr); return 0; }
e9e59d9b9c17380aa64d9b3efd1981537c6dfaf6.hip
// !!! This is a file automatically generated by hipify!!! /* Let's do better Efficient Parallel Scan Algorithms for GPUs Shubhabrata Sengupta Davis Mark Harris Michael Garland Parallel Scan for Stream Architectures1 Duane Merrill Andrew Grimshaw Inclusive scan We need to beat the 12ms mark on 2^25 elements (~33m elements) */ // for now everything is power of 2, normally this won't be the case -> padding + if elses #define ARR_SIZE (1 << 25) #define BLOCKSIZE 128 #define LOG2_BLOCKSIZE 7 #define SCAN_SMEM_WIDTH (BLOCKSIZE/32) #define LOG2_SCAN_SMEM_WIDTH (LOG2_BLOCKSIZE - 5) #define WORK_PER_THREAD 8 // 8 float4 each thread on scanning and reducing #define LOG2_WORK_PER_THREAD 5 // 32 elements (8 float4) #define MIDDLE_SCAN_STEP 64 // 2^(25 - 5 - 7 - 7) // -5 (8 float4 loads) - 7 (blocksize) - 7 (each thread of middle scan block) #define PADDING 0 #define SMEM_TOTAL_WIDTH (SCAN_SMEM_WIDTH + 1 + PADDING) #include <hip/hip_runtime.h> #include <iostream> #include <stdlib.h> #include <helper_cuda.h> #include <helper_math.h> /* // SIMT Kogge-Stone scan kernel __device__ __inline__ void scan_warp(volatile float* input, int indx = threadIdx.x){ int lane = indx & 31; if (lane >= 1) input[indx] = input[indx - 1] + input[indx]; if (lane >= 2) input[indx] = input[indx - 2] + input[indx]; if (lane >= 4) input[indx] = input[indx - 4] + input[indx]; if (lane >= 8) input[indx] = input[indx - 8] + input[indx]; if (lane >= 16) input[indx] = input[indx - 16] + input[indx]; } // SIMT Brent-Kung scan kernel - same as the merrill_srts reduction kernel but since it's the same as the warp size -> no need for __syncthreads() // BUT BUT!!!!! since this is SIMT -> there is actually 0 gain from reducing the number of operations , so the scan-warp will be used. // merrill tree reduce __global__ void reduce1(float4 *d_input, float *d_output){ __shared__ float s_data[BLOCKSIZE * 2];//1 cell per thread + another blockdim for easier indx management int idx = blockDim.x * blockIdx.x * WORK_PER_THREAD + threadIdx.x; d_input += idx; d_output += blockIdx.x; float4 item; float sum = 0; #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item = d_input[i * BLOCKSIZE]; sum += item.w + item.x + item.y + item.z; } s_data[threadIdx.x] = sum; __syncthreads(); // we reduce and put the result on the second half of shared memory float *a = s_data; #pragma unroll for(int d = LOG2_BLOCKSIZE; d > 5; d--){ if( threadIdx.x < (1 << (d - 1)) ){ a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1]; } a = &a[(1 << d)]; __syncthreads(); } if((threadIdx.x >> 5) == 0){ scan_warp(s_data); } // output the sum if(threadIdx.x == 0){ d_output[0] = a[31]; } }*/ // the only change is how smem is handled after the serial scan __device__ __inline__ void scan_warp_merrill_srts(volatile float (*s_data)[SMEM_TOTAL_WIDTH], int indx = threadIdx.x){ int lane = indx & 31; s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx][SCAN_SMEM_WIDTH - 1]; // in last column we doing the sums if (lane >= 1) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 1][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 2) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 2][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 4) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 4][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 8) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 8][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 16) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 16][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; } // merrill_srts reduce kernel __global__ void reduce(float4 *d_input, float *d_output){ __shared__ float s_data[32][SMEM_TOTAL_WIDTH]; int idx = blockDim.x * blockIdx.x * WORK_PER_THREAD + threadIdx.x; d_input += idx; d_output += blockIdx.x; int row = threadIdx.x >> LOG2_SCAN_SMEM_WIDTH; int col = threadIdx.x & (SCAN_SMEM_WIDTH - 1); float4 item[WORK_PER_THREAD]; float sum[WORK_PER_THREAD]; float total_sum = 0; #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item[i] = d_input[i * BLOCKSIZE]; sum[i] = item[i].x + item[i].y + item[i].z + item[i].w; } #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ s_data[row][col] = sum[i]; __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int i = 1; i < SCAN_SMEM_WIDTH; i++){ s_data[threadIdx.x][i] += s_data[threadIdx.x][i - 1]; } scan_warp_merrill_srts(s_data); } if(threadIdx.x == 0){ total_sum += s_data[31][SCAN_SMEM_WIDTH]; } } if(threadIdx.x == 0){ d_output[0] = total_sum; } /*float4 item; float sum = 0; #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item = d_input[i * BLOCKSIZE]; sum += item.x + item.y + item.z + item.w; } s_data[row][col] = sum; __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int i = 1; i < SCAN_SMEM_WIDTH; i++){ s_data[threadIdx.x][i] += s_data[threadIdx.x][i - 1]; } scan_warp_merrill_srts(s_data); } if(threadIdx.x == 0){ d_output[0] = s_data[31][SCAN_SMEM_WIDTH]; }*/ } // merrill_srts scan kernel __global__ void scan(float4 *d_input, float *seeds, float4 *d_output){ __shared__ float s_data[32][SMEM_TOTAL_WIDTH]; int idx = blockDim.x * blockIdx.x * WORK_PER_THREAD + threadIdx.x; d_input += idx; d_output += idx; int row = threadIdx.x >> LOG2_SCAN_SMEM_WIDTH; int col = threadIdx.x & (SCAN_SMEM_WIDTH - 1); float4 item[WORK_PER_THREAD]; float seed = 0; // hopefully with this, it will start the requests for the loads for the tiles i+1 while working on tile i #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item[i] = d_input[i * BLOCKSIZE]; item[i].y += item[i].x; item[i].z += item[i].y; item[i].w += item[i].z; } #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ if(threadIdx.x == 0 && blockIdx.x > 0 && i==0){ item[i] += seeds[blockIdx.x - 1]; } item[i] += seed; // only thread 0 adds seed here s_data[row][col] = item[i].w; __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int j = 1; j < SCAN_SMEM_WIDTH; j++){ s_data[threadIdx.x][j] += s_data[threadIdx.x][j - 1]; } scan_warp_merrill_srts(s_data); } __syncthreads(); // add the SIMT scan seeds // sum last column of simt scan if(row > 0){ item[i] += s_data[row - 1][SCAN_SMEM_WIDTH]; } // sum element before in row, serial scan if(col > 0){ item[i] += s_data[row][col - 1]; } if(threadIdx.x == 0){ seed = s_data[31][SCAN_SMEM_WIDTH]; } d_output[i * BLOCKSIZE] = item[i]; } /*float4 item; float seed = 0; for(int i = 0 ; i < WORK_PER_THREAD; i++){ item = d_input[i * BLOCKSIZE]; if(threadIdx.x == 0 && blockIdx.x > 0 && i==0){ item.x += seeds[blockIdx.x - 1]; } item.x += seed; // only thread 0 adds seed here item.y += item.x; item.z += item.y; item.w += item.z; s_data[row][col] = item.w; __syncthreads(); // serial reduce if((threadIdx.x >> 5) == 0){ #pragma unroll for(int j = 1; j < SCAN_SMEM_WIDTH; j++){ s_data[threadIdx.x][j] += s_data[threadIdx.x][j - 1]; } scan_warp_merrill_srts(s_data); } __syncthreads(); // add the SIMT scan seeds // sum last column of simt scan if(row > 0){ item += s_data[row - 1][SCAN_SMEM_WIDTH]; } // sum element before in row, serial scan if(col > 0){ item += s_data[row][col - 1]; } if(threadIdx.x == 0){ seed = s_data[31][SCAN_SMEM_WIDTH]; } d_output[i * BLOCKSIZE] = item; }*/ } // two level reduce then scan - middle scan kernel __global__ void middle_scan(float *seeds){ __shared__ float s_data[32][SMEM_TOTAL_WIDTH]; int row = threadIdx.x >> LOG2_SCAN_SMEM_WIDTH; int col = threadIdx.x & (SCAN_SMEM_WIDTH - 1); float seed = 0; seeds += threadIdx.x; // cyclically scan the reduced sums #pragma unroll for(int i = 0; i < MIDDLE_SCAN_STEP; i++){ s_data[row][col] = seeds[i * BLOCKSIZE] + seed; // only thread 0 adds seed here __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int j = 1; j < SCAN_SMEM_WIDTH; j++){ s_data[threadIdx.x][j] += s_data[threadIdx.x][j - 1]; } scan_warp_merrill_srts(s_data); } if(threadIdx.x == 0){ seed = s_data[31][SCAN_SMEM_WIDTH]; } __syncthreads(); if(threadIdx.x >= SCAN_SMEM_WIDTH){ seeds[i * BLOCKSIZE] = s_data[row][col] + s_data[row - 1][SCAN_SMEM_WIDTH]; } else { seeds[i * BLOCKSIZE] = s_data[0][threadIdx.x]; } } } // main + interface void cuda_interface_scan(float4* d_input, float4* d_output){ int temp = ARR_SIZE >> (LOG2_WORK_PER_THREAD + LOG2_BLOCKSIZE); // each thread processes 8 float4 dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(temp); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float total_time = 0; float elapsed_time; float *d_scan; hipMalloc((void **)&d_scan, temp * sizeof(float)); hipEventRecord(start, 0); hipLaunchKernelGGL(( reduce), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_scan); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf( "reduce: %.8f ms\n", elapsed_time); total_time += elapsed_time; hipEventRecord(start, 0); hipLaunchKernelGGL(( middle_scan), dim3(1), dim3(dimBlock), 0, 0, d_scan); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf( "middle scan: %.8f ms\n", elapsed_time); total_time += elapsed_time; hipEventRecord(start, 0); hipLaunchKernelGGL(( scan), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_scan, d_output); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf( "final scan: %.8f ms\n", elapsed_time); total_time += elapsed_time; printf("total time GPU %.8fms\n", total_time); hipFree(d_scan); hipEventDestroy(start); hipEventDestroy(stop); } void fill_array(float4 *h_input){ float *temp = (float*) h_input; for(int i = 0; i < ARR_SIZE; i++){ temp[i] = (float) rand() / RAND_MAX; } } void check(float4 *h_input, float4 *h_output){ float *temp1 = (float*) h_input; float *temp2 = (float*) h_output; float *temp3 = (float*) malloc(ARR_SIZE * sizeof(float)); temp3[0] = temp1[0]; for(int i = 1; i < ARR_SIZE; i++){ temp3[i] = temp1[i] + temp3[i - 1]; } std::cout<<"first 1050 elements:\n"; std::cout<<"element"<<"\tcpu"<<"\tgpu\n"; for(int i = 0; i < 1050; i++){ std::cout<<i<<"\t"<<temp1[i] << "\t" << temp3[i] << "\t" << temp2[i] <<"\n"; } free(temp3); } int main(void){ srand(0); float4 *h_input, *h_output; float4 *d_input, *d_output; h_input = (float4*) malloc(ARR_SIZE * sizeof(float)); h_output = (float4*) malloc(ARR_SIZE * sizeof(float)); fill_array(h_input); hipMalloc((void **)&d_input, ARR_SIZE * sizeof(float)); hipMalloc((void **)&d_output, ARR_SIZE * sizeof(float)); hipMemcpy(d_input, h_input, ARR_SIZE * sizeof(float), hipMemcpyHostToDevice); cuda_interface_scan(d_input, d_output); hipMemcpy(h_output, d_output, ARR_SIZE * sizeof(float), hipMemcpyDeviceToHost); check(h_input, h_output); hipFree(d_input); hipFree(d_output); free(h_input); free(h_output); return 0; }
e9e59d9b9c17380aa64d9b3efd1981537c6dfaf6.cu
/* Let's do better Efficient Parallel Scan Algorithms for GPUs Shubhabrata Sengupta Davis Mark Harris Michael Garland Parallel Scan for Stream Architectures1 Duane Merrill Andrew Grimshaw Inclusive scan We need to beat the 12ms mark on 2^25 elements (~33m elements) */ // for now everything is power of 2, normally this won't be the case -> padding + if elses #define ARR_SIZE (1 << 25) #define BLOCKSIZE 128 #define LOG2_BLOCKSIZE 7 #define SCAN_SMEM_WIDTH (BLOCKSIZE/32) #define LOG2_SCAN_SMEM_WIDTH (LOG2_BLOCKSIZE - 5) #define WORK_PER_THREAD 8 // 8 float4 each thread on scanning and reducing #define LOG2_WORK_PER_THREAD 5 // 32 elements (8 float4) #define MIDDLE_SCAN_STEP 64 // 2^(25 - 5 - 7 - 7) // -5 (8 float4 loads) - 7 (blocksize) - 7 (each thread of middle scan block) #define PADDING 0 #define SMEM_TOTAL_WIDTH (SCAN_SMEM_WIDTH + 1 + PADDING) #include <cuda_runtime.h> #include <iostream> #include <stdlib.h> #include <helper_cuda.h> #include <helper_math.h> /* // SIMT Kogge-Stone scan kernel __device__ __inline__ void scan_warp(volatile float* input, int indx = threadIdx.x){ int lane = indx & 31; if (lane >= 1) input[indx] = input[indx - 1] + input[indx]; if (lane >= 2) input[indx] = input[indx - 2] + input[indx]; if (lane >= 4) input[indx] = input[indx - 4] + input[indx]; if (lane >= 8) input[indx] = input[indx - 8] + input[indx]; if (lane >= 16) input[indx] = input[indx - 16] + input[indx]; } // SIMT Brent-Kung scan kernel - same as the merrill_srts reduction kernel but since it's the same as the warp size -> no need for __syncthreads() // BUT BUT!!!!! since this is SIMT -> there is actually 0 gain from reducing the number of operations , so the scan-warp will be used. // merrill tree reduce __global__ void reduce1(float4 *d_input, float *d_output){ __shared__ float s_data[BLOCKSIZE * 2];//1 cell per thread + another blockdim for easier indx management int idx = blockDim.x * blockIdx.x * WORK_PER_THREAD + threadIdx.x; d_input += idx; d_output += blockIdx.x; float4 item; float sum = 0; #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item = d_input[i * BLOCKSIZE]; sum += item.w + item.x + item.y + item.z; } s_data[threadIdx.x] = sum; __syncthreads(); // we reduce and put the result on the second half of shared memory float *a = s_data; #pragma unroll for(int d = LOG2_BLOCKSIZE; d > 5; d--){ if( threadIdx.x < (1 << (d - 1)) ){ a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1]; } a = &a[(1 << d)]; __syncthreads(); } if((threadIdx.x >> 5) == 0){ scan_warp(s_data); } // output the sum if(threadIdx.x == 0){ d_output[0] = a[31]; } }*/ // the only change is how smem is handled after the serial scan __device__ __inline__ void scan_warp_merrill_srts(volatile float (*s_data)[SMEM_TOTAL_WIDTH], int indx = threadIdx.x){ int lane = indx & 31; s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx][SCAN_SMEM_WIDTH - 1]; // in last column we doing the sums if (lane >= 1) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 1][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 2) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 2][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 4) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 4][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 8) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 8][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; if (lane >= 16) s_data[indx][SCAN_SMEM_WIDTH] = s_data[indx - 16][SCAN_SMEM_WIDTH] + s_data[indx][SCAN_SMEM_WIDTH]; } // merrill_srts reduce kernel __global__ void reduce(float4 *d_input, float *d_output){ __shared__ float s_data[32][SMEM_TOTAL_WIDTH]; int idx = blockDim.x * blockIdx.x * WORK_PER_THREAD + threadIdx.x; d_input += idx; d_output += blockIdx.x; int row = threadIdx.x >> LOG2_SCAN_SMEM_WIDTH; int col = threadIdx.x & (SCAN_SMEM_WIDTH - 1); float4 item[WORK_PER_THREAD]; float sum[WORK_PER_THREAD]; float total_sum = 0; #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item[i] = d_input[i * BLOCKSIZE]; sum[i] = item[i].x + item[i].y + item[i].z + item[i].w; } #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ s_data[row][col] = sum[i]; __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int i = 1; i < SCAN_SMEM_WIDTH; i++){ s_data[threadIdx.x][i] += s_data[threadIdx.x][i - 1]; } scan_warp_merrill_srts(s_data); } if(threadIdx.x == 0){ total_sum += s_data[31][SCAN_SMEM_WIDTH]; } } if(threadIdx.x == 0){ d_output[0] = total_sum; } /*float4 item; float sum = 0; #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item = d_input[i * BLOCKSIZE]; sum += item.x + item.y + item.z + item.w; } s_data[row][col] = sum; __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int i = 1; i < SCAN_SMEM_WIDTH; i++){ s_data[threadIdx.x][i] += s_data[threadIdx.x][i - 1]; } scan_warp_merrill_srts(s_data); } if(threadIdx.x == 0){ d_output[0] = s_data[31][SCAN_SMEM_WIDTH]; }*/ } // merrill_srts scan kernel __global__ void scan(float4 *d_input, float *seeds, float4 *d_output){ __shared__ float s_data[32][SMEM_TOTAL_WIDTH]; int idx = blockDim.x * blockIdx.x * WORK_PER_THREAD + threadIdx.x; d_input += idx; d_output += idx; int row = threadIdx.x >> LOG2_SCAN_SMEM_WIDTH; int col = threadIdx.x & (SCAN_SMEM_WIDTH - 1); float4 item[WORK_PER_THREAD]; float seed = 0; // hopefully with this, it will start the requests for the loads for the tiles i+1 while working on tile i #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ item[i] = d_input[i * BLOCKSIZE]; item[i].y += item[i].x; item[i].z += item[i].y; item[i].w += item[i].z; } #pragma unroll for(int i = 0; i < WORK_PER_THREAD; i++){ if(threadIdx.x == 0 && blockIdx.x > 0 && i==0){ item[i] += seeds[blockIdx.x - 1]; } item[i] += seed; // only thread 0 adds seed here s_data[row][col] = item[i].w; __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int j = 1; j < SCAN_SMEM_WIDTH; j++){ s_data[threadIdx.x][j] += s_data[threadIdx.x][j - 1]; } scan_warp_merrill_srts(s_data); } __syncthreads(); // add the SIMT scan seeds // sum last column of simt scan if(row > 0){ item[i] += s_data[row - 1][SCAN_SMEM_WIDTH]; } // sum element before in row, serial scan if(col > 0){ item[i] += s_data[row][col - 1]; } if(threadIdx.x == 0){ seed = s_data[31][SCAN_SMEM_WIDTH]; } d_output[i * BLOCKSIZE] = item[i]; } /*float4 item; float seed = 0; for(int i = 0 ; i < WORK_PER_THREAD; i++){ item = d_input[i * BLOCKSIZE]; if(threadIdx.x == 0 && blockIdx.x > 0 && i==0){ item.x += seeds[blockIdx.x - 1]; } item.x += seed; // only thread 0 adds seed here item.y += item.x; item.z += item.y; item.w += item.z; s_data[row][col] = item.w; __syncthreads(); // serial reduce if((threadIdx.x >> 5) == 0){ #pragma unroll for(int j = 1; j < SCAN_SMEM_WIDTH; j++){ s_data[threadIdx.x][j] += s_data[threadIdx.x][j - 1]; } scan_warp_merrill_srts(s_data); } __syncthreads(); // add the SIMT scan seeds // sum last column of simt scan if(row > 0){ item += s_data[row - 1][SCAN_SMEM_WIDTH]; } // sum element before in row, serial scan if(col > 0){ item += s_data[row][col - 1]; } if(threadIdx.x == 0){ seed = s_data[31][SCAN_SMEM_WIDTH]; } d_output[i * BLOCKSIZE] = item; }*/ } // two level reduce then scan - middle scan kernel __global__ void middle_scan(float *seeds){ __shared__ float s_data[32][SMEM_TOTAL_WIDTH]; int row = threadIdx.x >> LOG2_SCAN_SMEM_WIDTH; int col = threadIdx.x & (SCAN_SMEM_WIDTH - 1); float seed = 0; seeds += threadIdx.x; // cyclically scan the reduced sums #pragma unroll for(int i = 0; i < MIDDLE_SCAN_STEP; i++){ s_data[row][col] = seeds[i * BLOCKSIZE] + seed; // only thread 0 adds seed here __syncthreads(); if((threadIdx.x >> 5) == 0){ #pragma unroll for(int j = 1; j < SCAN_SMEM_WIDTH; j++){ s_data[threadIdx.x][j] += s_data[threadIdx.x][j - 1]; } scan_warp_merrill_srts(s_data); } if(threadIdx.x == 0){ seed = s_data[31][SCAN_SMEM_WIDTH]; } __syncthreads(); if(threadIdx.x >= SCAN_SMEM_WIDTH){ seeds[i * BLOCKSIZE] = s_data[row][col] + s_data[row - 1][SCAN_SMEM_WIDTH]; } else { seeds[i * BLOCKSIZE] = s_data[0][threadIdx.x]; } } } // main + interface void cuda_interface_scan(float4* d_input, float4* d_output){ int temp = ARR_SIZE >> (LOG2_WORK_PER_THREAD + LOG2_BLOCKSIZE); // each thread processes 8 float4 dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(temp); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float total_time = 0; float elapsed_time; float *d_scan; cudaMalloc((void **)&d_scan, temp * sizeof(float)); cudaEventRecord(start, 0); reduce<<<dimGrid, dimBlock>>>(d_input, d_scan); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf( "reduce: %.8f ms\n", elapsed_time); total_time += elapsed_time; cudaEventRecord(start, 0); middle_scan<<<1, dimBlock>>>(d_scan); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf( "middle scan: %.8f ms\n", elapsed_time); total_time += elapsed_time; cudaEventRecord(start, 0); scan<<<dimGrid, dimBlock>>>(d_input, d_scan, d_output); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf( "final scan: %.8f ms\n", elapsed_time); total_time += elapsed_time; printf("total time GPU %.8fms\n", total_time); cudaFree(d_scan); cudaEventDestroy(start); cudaEventDestroy(stop); } void fill_array(float4 *h_input){ float *temp = (float*) h_input; for(int i = 0; i < ARR_SIZE; i++){ temp[i] = (float) rand() / RAND_MAX; } } void check(float4 *h_input, float4 *h_output){ float *temp1 = (float*) h_input; float *temp2 = (float*) h_output; float *temp3 = (float*) malloc(ARR_SIZE * sizeof(float)); temp3[0] = temp1[0]; for(int i = 1; i < ARR_SIZE; i++){ temp3[i] = temp1[i] + temp3[i - 1]; } std::cout<<"first 1050 elements:\n"; std::cout<<"element"<<"\tcpu"<<"\tgpu\n"; for(int i = 0; i < 1050; i++){ std::cout<<i<<"\t"<<temp1[i] << "\t" << temp3[i] << "\t" << temp2[i] <<"\n"; } free(temp3); } int main(void){ srand(0); float4 *h_input, *h_output; float4 *d_input, *d_output; h_input = (float4*) malloc(ARR_SIZE * sizeof(float)); h_output = (float4*) malloc(ARR_SIZE * sizeof(float)); fill_array(h_input); cudaMalloc((void **)&d_input, ARR_SIZE * sizeof(float)); cudaMalloc((void **)&d_output, ARR_SIZE * sizeof(float)); cudaMemcpy(d_input, h_input, ARR_SIZE * sizeof(float), cudaMemcpyHostToDevice); cuda_interface_scan(d_input, d_output); cudaMemcpy(h_output, d_output, ARR_SIZE * sizeof(float), cudaMemcpyDeviceToHost); check(h_input, h_output); cudaFree(d_input); cudaFree(d_output); free(h_input); free(h_output); return 0; }
c47825dd2d3ace602e463337e9aa42fb471179e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include "../include/solver.cuh" # include "../opencv_headers/opencv2/opencv.hpp" # include "../include/timer.cuh" # include "../include/tinyexr.h" const int fftRank = 1; static const int m = 3; static __constant__ size_t nPhiGlobal; static __constant__ size_t nThetaGlobal; __global__ void initMapping(fReal* map_theta, fReal* map_phi){ // Index int splitVal = (nPhiGlobal + blockDim.x - 1) / blockDim.x; int threadSequence = blockIdx.x % splitVal; int phiId = threadIdx.x + threadSequence * blockDim.x; int thetaId = blockIdx.x / splitVal; if (phiId >= nPhiGlobal) return; map_theta[thetaId * nPhiGlobal + phiId] = (fReal)thetaId + centeredThetaOffset; map_phi[thetaId * nPhiGlobal + phiId] = (fReal)phiId + centeredPhiOffset; } __global__ void initLinearSystem(int* row_ptr, int* col_ind) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > nPhiGlobal * nThetaGlobal) return; int idx5 = 5 * idx; row_ptr[idx] = idx5; if (idx < nPhiGlobal * nThetaGlobal) { // up if (idx < nPhiGlobal) { // first row col_ind[idx5] = (idx + nThetaGlobal) % nPhiGlobal; } else { col_ind[idx5] = idx - nPhiGlobal; } // left col_ind[idx5 + 1] = (idx % nPhiGlobal) == 0 ? idx + nPhiGlobal - 1 : idx - 1; // center col_ind[idx5 + 2] = idx; // right col_ind[idx5 + 3] = (idx % nPhiGlobal) == (nPhiGlobal - 1) ? idx - nPhiGlobal + 1 : idx + 1; // down if (idx >= (nThetaGlobal - 1) * nPhiGlobal + nThetaGlobal) { // last half of the last row col_ind[idx5 + 4] = idx - nThetaGlobal; } else if (idx >= (nThetaGlobal - 1) * nPhiGlobal) { // first half of the last row col_ind[idx5 + 4] = idx + nThetaGlobal; } else { col_ind[idx5 + 4] = idx + nPhiGlobal; } } } Solver::Solver(size_t nPhi, size_t nTheta, fReal radius, fReal dt, fReal H, int device, std::string AMGconfig) : nPhi(nPhi), nTheta(nTheta), radius(radius), invRadius(1.0/radius), gridLen(M_2PI / nPhi), invGridLen(1.0 / gridLen), timeStep(dt), timeElapsed(0.0), advectionTime(0.0), bodyforceTime(0.0), CGTime(0.0), H(H), epsilon(H/radius), N(nPhi*nTheta), nz(5*N), device(device) { /// FIXME: Should we detect and use device 0? /// Replace it later with functions from helper_cuda.h! checkCudaErrors(hipSetDevice(device)); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, device)); // otherwise no enough resource this->nThreadxMax = ::min(deviceProp.maxThreadsDim[0], 256); checkCudaErrors(hipMalloc(&div, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&uair, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&vair, (N - nPhi) * sizeof(fReal))); checkCudaErrors(hipMalloc(&uair_init, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&vair_init, (N - nPhi) * sizeof(fReal))); checkCudaErrors(hipMalloc(&fu, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&fv, (N - nPhi) * sizeof(fReal))); checkCudaErrors(hipMalloc(&row_ptr, (N + 1) * sizeof(int))); checkCudaErrors(hipMalloc(&col_ind, nz * sizeof(int))); checkCudaErrors(hipMalloc(&rhs, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&val, nz * sizeof(fReal))); checkCudaErrors(hipMalloc(&d_x, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&d_r, N * sizeof(fReal))); checkCudaErrors(hipMemcpyToSymbol(nPhiGlobal, &(this->nPhi), sizeof(size_t))); checkCudaErrors(hipMemcpyToSymbol(nThetaGlobal, &(this->nTheta), sizeof(size_t))); this->velPhi = new BimocqQuantity("velPhi", nPhi, nTheta, vPhiPhiOffset, vPhiThetaOffset); this->velTheta = new BimocqQuantity("velTheta", nPhi, nTheta - 1, vThetaPhiOffset, vThetaThetaOffset); this->thickness = new BimocqQuantity("eta", nPhi, nTheta, centeredPhiOffset, centeredThetaOffset); this->concentration = new BimocqQuantity("gamma", nPhi, nTheta, centeredPhiOffset, centeredThetaOffset); this->pitch = concentration->getThisStepPitchInElements(); initWithConst(this->velPhi, 0.0); initWithConst(this->velTheta, 0.0); // initialize_velocity(); initWithConst(this->thickness, 0.5); initWithConst(this->concentration, 1.0); dim3 gridLayout; dim3 blockLayout; determineLayout(gridLayout, blockLayout, 1, N + 1); hipLaunchKernelGGL(( initLinearSystem), dim3(gridLayout), dim3(blockLayout), 0, 0, row_ptr, col_ind); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); /* AMGX */ int devices[] = {device}; const char *AMGconfigFile = AMGconfig.c_str(); AMGX_initialize(); AMGX_initialize_plugins(); AMGX_config_create_from_file(&cfg, AMGconfigFile); AMGX_resources_create(&res, cfg, NULL, 1, devices); # ifdef USEFLOAT mode = AMGX_mode_dFFI; # else mode = AMGX_mode_dDDI; # endif AMGX_matrix_create(&A, res, mode); AMGX_vector_create(&b, res, mode); AMGX_vector_create(&x, res, mode); AMGX_solver_create(&solver, res, mode, cfg); /* Bimocq mapping buffers */ checkCudaErrors(hipMalloc(&forward_p, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&forward_t, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&backward_p, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&backward_t, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&backward_pprev, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&backward_tprev, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&tmp_p, N * sizeof(fReal))); checkCudaErrors(hipMalloc(&tmp_t, N * sizeof(fReal))); determineLayout(gridLayout, blockLayout, nTheta, nPhi); hipLaunchKernelGGL(( initMapping), dim3(gridLayout), dim3(blockLayout), 0, 0, forward_t, forward_p); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(backward_p, forward_p, N * sizeof(fReal), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(backward_pprev, forward_p, N * sizeof(fReal), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(backward_t, forward_t, N * sizeof(fReal), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(backward_tprev, forward_t, N * sizeof(fReal), hipMemcpyDeviceToDevice)); } Solver::~Solver() { checkCudaErrors(hipFree(div)); checkCudaErrors(hipFree(uair)); checkCudaErrors(hipFree(vair)); checkCudaErrors(hipFree(uair_init)); checkCudaErrors(hipFree(vair_init)); checkCudaErrors(hipFree(fu)); checkCudaErrors(hipFree(fv)); checkCudaErrors(hipFree(row_ptr)); checkCudaErrors(hipFree(col_ind)); checkCudaErrors(hipFree(rhs)); checkCudaErrors(hipFree(val)); checkCudaErrors(hipFree(d_x)); checkCudaErrors(hipFree(d_r)); delete this->velPhi; delete this->velTheta; delete this->thickness; delete this->concentration; /* AMGX */ AMGX_solver_destroy(solver); AMGX_vector_destroy(b); AMGX_vector_destroy(x); AMGX_matrix_destroy(A); AMGX_resources_destroy(res); AMGX_config_destroy(cfg); AMGX_finalize_plugins(); AMGX_finalize(); /* Bimocq mapping buffers */ checkCudaErrors(hipFree(forward_p)); checkCudaErrors(hipFree(forward_t)); checkCudaErrors(hipFree(backward_p)); checkCudaErrors(hipFree(backward_t)); checkCudaErrors(hipFree(backward_pprev)); checkCudaErrors(hipFree(backward_tprev)); checkCudaErrors(hipFree(tmp_p)); checkCudaErrors(hipFree(tmp_t)); checkCudaErrors(hipDeviceReset()); # ifdef PERFORMANCE_BENCHMARK fReal totalTimeUsed = this->advectionTime + this->bodyforceTime; std::cout << "Total time used for advection : " << this->advectionTime << std::endl; std::cout << "Total time used for body force : " << this->bodyforceTime << std::endl; std::cout << "Total time used for CG : " << CGTime << std::endl; std::cout << "Percentage of advection : " << advectionTime / totalTimeUsed * 100.0f << "%" << std::endl; std::cout << "Percentage of bodyforce : " << bodyforceTime / totalTimeUsed * 100.0f << "%" << std::endl; std::cout << "Percentage of CG / bodyforce : " << CGTime / bodyforceTime * 100.0f << "%" << std::endl; std::cout << "Elapsed time " << this->timeElapsed << std::endl; # endif } void Solver::copyVelocity2GPU() { velPhi->copyToGPU(); velTheta->copyToGPU(); } void Solver::determineLayout(dim3& gridLayout, dim3& blockLayout, size_t nTheta_row, size_t nPhi_col) { if (nPhi_col <= this->nThreadxMax) { gridLayout = dim3(nTheta_row); blockLayout = dim3(nPhi_col); } else { int splitVal = (nPhi_col + nThreadxMax - 1) / nThreadxMax; gridLayout = dim3(nTheta_row * splitVal); blockLayout = dim3(nThreadxMax); } } void Solver::copyToCPU(Quantity* quantity, fReal* cpubuffer) { checkCudaErrors(hipMemcpy2D((void*)cpubuffer, quantity->getNPhi() * sizeof(fReal), (void*)quantity->getGPUThisStep(), quantity->getThisStepPitchInElements() * sizeof(fReal), quantity->getNPhi() * sizeof(fReal), quantity->getNTheta(), hipMemcpyDeviceToHost)); } void Solver::stepForward(fReal dt) { fReal dt_ = this->timeStep; this->timeStep = dt; stepForward(); this->timeStep = dt_; } void Solver::stepForward() { # ifdef PERFORMANCE_BENCHMARK Timer timer; timer.startTimer(); # endif # ifdef BIMOCQ // updateCFL(); updateForward(this->timeStep, forward_t, forward_p); updateBackward(this->timeStep, backward_t, backward_p); # endif advection(); # ifdef PERFORMANCE_BENCHMARK this->advectionTime += timer.stopTimer() * 0.001f; timer.startTimer(); # endif bodyforce(); # ifdef PERFORMANCE_BENCHMARK this->bodyforceTime += timer.stopTimer() * 0.001f; # endif # ifdef BIMOCQ fReal distortion = estimateDistortion(); std::cout << "max distortion " << distortion << std::endl; // can be adjusted empirically if (distortion > 4.f) { reInitializeMapping(); std::cout << "mapping reinitialized" << std::endl; } # endif this->timeElapsed += this->timeStep; count++; } void Solver::swapVelocityBuffers() { this->velPhi->swapGPUBuffer(); this->velTheta->swapGPUBuffer(); } void Solver::copyVelocityBack2CPU() { this->velPhi->copyBackToCPU(); this->velTheta->copyBackToCPU(); // check if film is broken setBroken(true); for (size_t i = 0; i < this->velPhi->getNPhi(); ++i) { for (size_t j = 0; j < this->velPhi->getNTheta(); ++j) { fReal val = this->velPhi->getCPUValueAt(i, j); if (!isnan(val)) { setBroken(false); goto finish; } } } for (size_t i = 0; i < this->velTheta->getNPhi(); ++i) { for (size_t j = 0; j < this->velTheta->getNTheta(); ++j) { fReal val = this->velTheta->getCPUValueAt(i, j); if (!isnan(val)) { setBroken(false); goto finish; } } } finish: } bool Solver::isBroken() { return this->broken; } void Solver::setBroken(bool broken) { this->broken = broken; } void Solver::write_image(const std::string& s, size_t width, size_t height, std::vector<float> *images) { const char *filename = s.c_str(); EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = 3; float* image_ptr[3]; image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R image.images = (unsigned char**)image_ptr; image.width = width; // image.height = nTheta; image.height = height; header.num_channels = 3; header.channels = (EXRChannelInfo *)malloc(sizeof(EXRChannelInfo) * header.num_channels); // Must be (A)BGR order, since most of EXR viewers expect this channel order. strncpy(header.channels[0].name, "B", 255); header.channels[0].name[strlen("B")] = '\0'; strncpy(header.channels[1].name, "G", 255); header.channels[1].name[strlen("G")] = '\0'; strncpy(header.channels[2].name, "R", 255); header.channels[2].name[strlen("R")] = '\0'; header.pixel_types = (int *)malloc(sizeof(int) * header.num_channels); header.requested_pixel_types = (int *)malloc(sizeof(int) * header.num_channels); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // pixel type of output image to be stored in .EXR } const char* err = NULL; // or nullptr in C++11 or later. int ret = SaveEXRImageToFile(&image, &header, filename, &err); if (ret != TINYEXR_SUCCESS) { fprintf(stderr, "Save EXR err: %s\n", err); FreeEXRErrorMessage(err); // free's buffer for an error message return; } printf("Saved exr file. [ %s ] \n", filename); free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); } void Solver::write_velocity_image(const std::string& s, const int frame) { std::string img_string = std::to_string(frame); while (img_string.length() < 4) { img_string.insert(0, "0"); } img_string.insert(0, s + "vel"); # ifdef WRITE_TXT std::string u_string = img_string; std::string v_string = img_string; u_string.append("u.txt"); v_string.append("v.txt"); std::ofstream ofu(u_string); std::ofstream ofv(v_string); # endif img_string.append(".exr"); copyVelocityBack2CPU(); std::vector<float> images[3]; images[0].resize(nPhi * nTheta); images[1].resize(nPhi * nTheta); images[2].resize(nPhi * nTheta); fReal maxu = std::numeric_limits<fReal>::min(); fReal maxv = std::numeric_limits<fReal>::min(); fReal minu = std::numeric_limits<fReal>::max(); fReal minv = std::numeric_limits<fReal>::max(); size_t maxuthetaid = 0; size_t maxuphiid = 0; size_t minuthetaid = 0; size_t minuphiid = 0; size_t maxvthetaid = 0; size_t maxvphiid = 0; size_t minvthetaid = 0; size_t minvphiid = 0; for (size_t j = 0; j < nTheta; ++j) { for (size_t i = 0; i < nPhi; ++i) { fReal uW = velPhi->getCPUValueAt(i, j); fReal uE; fReal vN; fReal vS; if (i != nPhi - 1) { uE = velPhi->getCPUValueAt(i + 1, j); } else { uE = velPhi->getCPUValueAt(0, j); } if (j != 0) { vN = velTheta->getCPUValueAt(i, j - 1); } else { size_t oppositei = (i + nPhi/2) % nPhi; vN = 0.75 * velTheta->getCPUValueAt(i, j) - 0.25 * velTheta->getCPUValueAt(oppositei, j); } if (j != nTheta - 1) { vS = velTheta->getCPUValueAt(i, j); // ofv << vS << " "; } else { size_t oppositei = (i + nPhi/2) % nPhi; vS = 0.75 * velTheta->getCPUValueAt(i, j - 1) - 0.25 * velTheta->getCPUValueAt(oppositei, j - 1); } fReal u = 0.5 * (uW + uE); fReal v = 0.5 * (vN + vS); # ifdef WRITE_TXT ofu << u << " "; ofv << v << " "; # endif if (u > maxu) { maxu = u; maxuthetaid = j; maxuphiid = i; } if (u < minu) { minu = u; minuthetaid = j; minuphiid = i; } if (v > maxv) { maxv = v; maxvthetaid = j; maxvphiid = i; } if (v < minv) { minv = v; minvthetaid = j; minvphiid = i; } // std::cout << "theta " << j << " phi " << i << " u " << u << " v " << v << std::endl; images[0][j*nPhi+i] = float(u/2+0.5); // R images[1][j*nPhi+i] = float(v/2+0.5); // G images[2][j*nPhi+i] = float(0.5); // B } # ifdef WRITE_TXT ofu << std::endl; ofv << std::endl; # endif } std::cout << "max u = " << maxu << " theta " << maxuthetaid << " phi " << maxuphiid << std::endl; std::cout << "min u = " << minu << " theta " << minuthetaid << " phi " << minuphiid << std::endl; std::cout << "max v = " << maxv << " theta " << maxvthetaid << " phi " << maxvphiid << std::endl; std::cout << "min v = " << minv << " theta " << minvthetaid << " phi " << minvphiid << std::endl; write_image(img_string, nPhi, nTheta, images); } void Solver::write_concentration_image(const std::string& s, const int frame) { std::string img_string = std::to_string(frame); while (img_string.length() < 4) { img_string.insert(0, "0"); } img_string.insert(0, s + "con"); # ifdef WRITE_TXT std::string mat_string = img_string; mat_string.append(".txt"); std::ofstream of(mat_string); # endif img_string.append(".exr"); concentration->copyBackToCPU(); std::vector<float> images[3]; images[0].resize(nPhi * nTheta); images[1].resize(nPhi * nTheta); images[2].resize(nPhi * nTheta); for (size_t j = 0; j < nTheta; ++j) { for (size_t i = 0; i < nPhi; ++i) { fReal con = concentration->getCPUValueAt(i, j); images[0][j*nPhi+i] = (con - 0.9) / 0.2; images[1][j*nPhi+i] = (con - 0.9) / 0.2; images[2][j*nPhi+i] = (con - 0.9) / 0.2; # ifdef WRITE_TXT of << con << " "; # endif } # ifdef WRITE_TXT of << std::endl; # endif } write_image(img_string, nPhi, nTheta, images); } void Solver::write_thickness_img(const std::string& s, const int frame) { std::string img_string = std::to_string(frame); while (img_string.length() < 4) { img_string.insert(0, "0"); } img_string.insert(0, s + "frame"); # ifdef WRITE_TXT std::string mat_string = img_string; mat_string.append(".txt"); std::ofstream of(mat_string); # endif std::ofstream thick; thick.open("thickness1024_vdw.txt", std::ofstream::out | std::ofstream::app); img_string.append(".exr"); thickness->copyBackToCPU(); std::vector<float> images[3]; images[0].resize(nPhi * nTheta); images[1].resize(nPhi * nTheta); images[2].resize(nPhi * nTheta); fReal minE = 1.0; fReal ratio = 2e5; // * 10 this->setBroken(true); for (size_t j = 0; j < nTheta; ++j) { for (size_t i = 0; i < nPhi; ++i) { fReal Delta = thickness->getCPUValueAt(i, j); if (Delta > 0) { this->setBroken(false); } // return; //} else { if (Delta < minE) minE = Delta; images[0][j*nPhi+i] = Delta * this->H * ratio; images[1][j*nPhi+i] = Delta * this->H * ratio; images[2][j*nPhi+i] = Delta * this->H * ratio; # ifdef WRITE_TXT of << Delta * this->H * 2 << " "; # endif //} } # ifdef WRITE_TXT of << std::endl; # endif } write_image(img_string, nPhi, nTheta, images); std::cout << "min thickness " << minE << std::endl; } fReal Solver::getGridLen() { return this->gridLen; }
c47825dd2d3ace602e463337e9aa42fb471179e3.cu
# include "../include/solver.cuh" # include "../opencv_headers/opencv2/opencv.hpp" # include "../include/timer.cuh" # include "../include/tinyexr.h" const int fftRank = 1; static const int m = 3; static __constant__ size_t nPhiGlobal; static __constant__ size_t nThetaGlobal; __global__ void initMapping(fReal* map_theta, fReal* map_phi){ // Index int splitVal = (nPhiGlobal + blockDim.x - 1) / blockDim.x; int threadSequence = blockIdx.x % splitVal; int phiId = threadIdx.x + threadSequence * blockDim.x; int thetaId = blockIdx.x / splitVal; if (phiId >= nPhiGlobal) return; map_theta[thetaId * nPhiGlobal + phiId] = (fReal)thetaId + centeredThetaOffset; map_phi[thetaId * nPhiGlobal + phiId] = (fReal)phiId + centeredPhiOffset; } __global__ void initLinearSystem(int* row_ptr, int* col_ind) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > nPhiGlobal * nThetaGlobal) return; int idx5 = 5 * idx; row_ptr[idx] = idx5; if (idx < nPhiGlobal * nThetaGlobal) { // up if (idx < nPhiGlobal) { // first row col_ind[idx5] = (idx + nThetaGlobal) % nPhiGlobal; } else { col_ind[idx5] = idx - nPhiGlobal; } // left col_ind[idx5 + 1] = (idx % nPhiGlobal) == 0 ? idx + nPhiGlobal - 1 : idx - 1; // center col_ind[idx5 + 2] = idx; // right col_ind[idx5 + 3] = (idx % nPhiGlobal) == (nPhiGlobal - 1) ? idx - nPhiGlobal + 1 : idx + 1; // down if (idx >= (nThetaGlobal - 1) * nPhiGlobal + nThetaGlobal) { // last half of the last row col_ind[idx5 + 4] = idx - nThetaGlobal; } else if (idx >= (nThetaGlobal - 1) * nPhiGlobal) { // first half of the last row col_ind[idx5 + 4] = idx + nThetaGlobal; } else { col_ind[idx5 + 4] = idx + nPhiGlobal; } } } Solver::Solver(size_t nPhi, size_t nTheta, fReal radius, fReal dt, fReal H, int device, std::string AMGconfig) : nPhi(nPhi), nTheta(nTheta), radius(radius), invRadius(1.0/radius), gridLen(M_2PI / nPhi), invGridLen(1.0 / gridLen), timeStep(dt), timeElapsed(0.0), advectionTime(0.0), bodyforceTime(0.0), CGTime(0.0), H(H), epsilon(H/radius), N(nPhi*nTheta), nz(5*N), device(device) { /// FIXME: Should we detect and use device 0? /// Replace it later with functions from helper_cuda.h! checkCudaErrors(cudaSetDevice(device)); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, device)); // otherwise no enough resource this->nThreadxMax = std::min(deviceProp.maxThreadsDim[0], 256); checkCudaErrors(cudaMalloc(&div, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&uair, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&vair, (N - nPhi) * sizeof(fReal))); checkCudaErrors(cudaMalloc(&uair_init, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&vair_init, (N - nPhi) * sizeof(fReal))); checkCudaErrors(cudaMalloc(&fu, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&fv, (N - nPhi) * sizeof(fReal))); checkCudaErrors(cudaMalloc(&row_ptr, (N + 1) * sizeof(int))); checkCudaErrors(cudaMalloc(&col_ind, nz * sizeof(int))); checkCudaErrors(cudaMalloc(&rhs, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&val, nz * sizeof(fReal))); checkCudaErrors(cudaMalloc(&d_x, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&d_r, N * sizeof(fReal))); checkCudaErrors(cudaMemcpyToSymbol(nPhiGlobal, &(this->nPhi), sizeof(size_t))); checkCudaErrors(cudaMemcpyToSymbol(nThetaGlobal, &(this->nTheta), sizeof(size_t))); this->velPhi = new BimocqQuantity("velPhi", nPhi, nTheta, vPhiPhiOffset, vPhiThetaOffset); this->velTheta = new BimocqQuantity("velTheta", nPhi, nTheta - 1, vThetaPhiOffset, vThetaThetaOffset); this->thickness = new BimocqQuantity("eta", nPhi, nTheta, centeredPhiOffset, centeredThetaOffset); this->concentration = new BimocqQuantity("gamma", nPhi, nTheta, centeredPhiOffset, centeredThetaOffset); this->pitch = concentration->getThisStepPitchInElements(); initWithConst(this->velPhi, 0.0); initWithConst(this->velTheta, 0.0); // initialize_velocity(); initWithConst(this->thickness, 0.5); initWithConst(this->concentration, 1.0); dim3 gridLayout; dim3 blockLayout; determineLayout(gridLayout, blockLayout, 1, N + 1); initLinearSystem<<<gridLayout, blockLayout>>>(row_ptr, col_ind); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); /* AMGX */ int devices[] = {device}; const char *AMGconfigFile = AMGconfig.c_str(); AMGX_initialize(); AMGX_initialize_plugins(); AMGX_config_create_from_file(&cfg, AMGconfigFile); AMGX_resources_create(&res, cfg, NULL, 1, devices); # ifdef USEFLOAT mode = AMGX_mode_dFFI; # else mode = AMGX_mode_dDDI; # endif AMGX_matrix_create(&A, res, mode); AMGX_vector_create(&b, res, mode); AMGX_vector_create(&x, res, mode); AMGX_solver_create(&solver, res, mode, cfg); /* Bimocq mapping buffers */ checkCudaErrors(cudaMalloc(&forward_p, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&forward_t, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&backward_p, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&backward_t, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&backward_pprev, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&backward_tprev, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&tmp_p, N * sizeof(fReal))); checkCudaErrors(cudaMalloc(&tmp_t, N * sizeof(fReal))); determineLayout(gridLayout, blockLayout, nTheta, nPhi); initMapping<<<gridLayout, blockLayout>>>(forward_t, forward_p); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(backward_p, forward_p, N * sizeof(fReal), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(backward_pprev, forward_p, N * sizeof(fReal), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(backward_t, forward_t, N * sizeof(fReal), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(backward_tprev, forward_t, N * sizeof(fReal), cudaMemcpyDeviceToDevice)); } Solver::~Solver() { checkCudaErrors(cudaFree(div)); checkCudaErrors(cudaFree(uair)); checkCudaErrors(cudaFree(vair)); checkCudaErrors(cudaFree(uair_init)); checkCudaErrors(cudaFree(vair_init)); checkCudaErrors(cudaFree(fu)); checkCudaErrors(cudaFree(fv)); checkCudaErrors(cudaFree(row_ptr)); checkCudaErrors(cudaFree(col_ind)); checkCudaErrors(cudaFree(rhs)); checkCudaErrors(cudaFree(val)); checkCudaErrors(cudaFree(d_x)); checkCudaErrors(cudaFree(d_r)); delete this->velPhi; delete this->velTheta; delete this->thickness; delete this->concentration; /* AMGX */ AMGX_solver_destroy(solver); AMGX_vector_destroy(b); AMGX_vector_destroy(x); AMGX_matrix_destroy(A); AMGX_resources_destroy(res); AMGX_config_destroy(cfg); AMGX_finalize_plugins(); AMGX_finalize(); /* Bimocq mapping buffers */ checkCudaErrors(cudaFree(forward_p)); checkCudaErrors(cudaFree(forward_t)); checkCudaErrors(cudaFree(backward_p)); checkCudaErrors(cudaFree(backward_t)); checkCudaErrors(cudaFree(backward_pprev)); checkCudaErrors(cudaFree(backward_tprev)); checkCudaErrors(cudaFree(tmp_p)); checkCudaErrors(cudaFree(tmp_t)); checkCudaErrors(cudaDeviceReset()); # ifdef PERFORMANCE_BENCHMARK fReal totalTimeUsed = this->advectionTime + this->bodyforceTime; std::cout << "Total time used for advection : " << this->advectionTime << std::endl; std::cout << "Total time used for body force : " << this->bodyforceTime << std::endl; std::cout << "Total time used for CG : " << CGTime << std::endl; std::cout << "Percentage of advection : " << advectionTime / totalTimeUsed * 100.0f << "%" << std::endl; std::cout << "Percentage of bodyforce : " << bodyforceTime / totalTimeUsed * 100.0f << "%" << std::endl; std::cout << "Percentage of CG / bodyforce : " << CGTime / bodyforceTime * 100.0f << "%" << std::endl; std::cout << "Elapsed time " << this->timeElapsed << std::endl; # endif } void Solver::copyVelocity2GPU() { velPhi->copyToGPU(); velTheta->copyToGPU(); } void Solver::determineLayout(dim3& gridLayout, dim3& blockLayout, size_t nTheta_row, size_t nPhi_col) { if (nPhi_col <= this->nThreadxMax) { gridLayout = dim3(nTheta_row); blockLayout = dim3(nPhi_col); } else { int splitVal = (nPhi_col + nThreadxMax - 1) / nThreadxMax; gridLayout = dim3(nTheta_row * splitVal); blockLayout = dim3(nThreadxMax); } } void Solver::copyToCPU(Quantity* quantity, fReal* cpubuffer) { checkCudaErrors(cudaMemcpy2D((void*)cpubuffer, quantity->getNPhi() * sizeof(fReal), (void*)quantity->getGPUThisStep(), quantity->getThisStepPitchInElements() * sizeof(fReal), quantity->getNPhi() * sizeof(fReal), quantity->getNTheta(), cudaMemcpyDeviceToHost)); } void Solver::stepForward(fReal dt) { fReal dt_ = this->timeStep; this->timeStep = dt; stepForward(); this->timeStep = dt_; } void Solver::stepForward() { # ifdef PERFORMANCE_BENCHMARK Timer timer; timer.startTimer(); # endif # ifdef BIMOCQ // updateCFL(); updateForward(this->timeStep, forward_t, forward_p); updateBackward(this->timeStep, backward_t, backward_p); # endif advection(); # ifdef PERFORMANCE_BENCHMARK this->advectionTime += timer.stopTimer() * 0.001f; timer.startTimer(); # endif bodyforce(); # ifdef PERFORMANCE_BENCHMARK this->bodyforceTime += timer.stopTimer() * 0.001f; # endif # ifdef BIMOCQ fReal distortion = estimateDistortion(); std::cout << "max distortion " << distortion << std::endl; // can be adjusted empirically if (distortion > 4.f) { reInitializeMapping(); std::cout << "mapping reinitialized" << std::endl; } # endif this->timeElapsed += this->timeStep; count++; } void Solver::swapVelocityBuffers() { this->velPhi->swapGPUBuffer(); this->velTheta->swapGPUBuffer(); } void Solver::copyVelocityBack2CPU() { this->velPhi->copyBackToCPU(); this->velTheta->copyBackToCPU(); // check if film is broken setBroken(true); for (size_t i = 0; i < this->velPhi->getNPhi(); ++i) { for (size_t j = 0; j < this->velPhi->getNTheta(); ++j) { fReal val = this->velPhi->getCPUValueAt(i, j); if (!isnan(val)) { setBroken(false); goto finish; } } } for (size_t i = 0; i < this->velTheta->getNPhi(); ++i) { for (size_t j = 0; j < this->velTheta->getNTheta(); ++j) { fReal val = this->velTheta->getCPUValueAt(i, j); if (!isnan(val)) { setBroken(false); goto finish; } } } finish: } bool Solver::isBroken() { return this->broken; } void Solver::setBroken(bool broken) { this->broken = broken; } void Solver::write_image(const std::string& s, size_t width, size_t height, std::vector<float> *images) { const char *filename = s.c_str(); EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = 3; float* image_ptr[3]; image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R image.images = (unsigned char**)image_ptr; image.width = width; // image.height = nTheta; image.height = height; header.num_channels = 3; header.channels = (EXRChannelInfo *)malloc(sizeof(EXRChannelInfo) * header.num_channels); // Must be (A)BGR order, since most of EXR viewers expect this channel order. strncpy(header.channels[0].name, "B", 255); header.channels[0].name[strlen("B")] = '\0'; strncpy(header.channels[1].name, "G", 255); header.channels[1].name[strlen("G")] = '\0'; strncpy(header.channels[2].name, "R", 255); header.channels[2].name[strlen("R")] = '\0'; header.pixel_types = (int *)malloc(sizeof(int) * header.num_channels); header.requested_pixel_types = (int *)malloc(sizeof(int) * header.num_channels); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // pixel type of output image to be stored in .EXR } const char* err = NULL; // or nullptr in C++11 or later. int ret = SaveEXRImageToFile(&image, &header, filename, &err); if (ret != TINYEXR_SUCCESS) { fprintf(stderr, "Save EXR err: %s\n", err); FreeEXRErrorMessage(err); // free's buffer for an error message return; } printf("Saved exr file. [ %s ] \n", filename); free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); } void Solver::write_velocity_image(const std::string& s, const int frame) { std::string img_string = std::to_string(frame); while (img_string.length() < 4) { img_string.insert(0, "0"); } img_string.insert(0, s + "vel"); # ifdef WRITE_TXT std::string u_string = img_string; std::string v_string = img_string; u_string.append("u.txt"); v_string.append("v.txt"); std::ofstream ofu(u_string); std::ofstream ofv(v_string); # endif img_string.append(".exr"); copyVelocityBack2CPU(); std::vector<float> images[3]; images[0].resize(nPhi * nTheta); images[1].resize(nPhi * nTheta); images[2].resize(nPhi * nTheta); fReal maxu = std::numeric_limits<fReal>::min(); fReal maxv = std::numeric_limits<fReal>::min(); fReal minu = std::numeric_limits<fReal>::max(); fReal minv = std::numeric_limits<fReal>::max(); size_t maxuthetaid = 0; size_t maxuphiid = 0; size_t minuthetaid = 0; size_t minuphiid = 0; size_t maxvthetaid = 0; size_t maxvphiid = 0; size_t minvthetaid = 0; size_t minvphiid = 0; for (size_t j = 0; j < nTheta; ++j) { for (size_t i = 0; i < nPhi; ++i) { fReal uW = velPhi->getCPUValueAt(i, j); fReal uE; fReal vN; fReal vS; if (i != nPhi - 1) { uE = velPhi->getCPUValueAt(i + 1, j); } else { uE = velPhi->getCPUValueAt(0, j); } if (j != 0) { vN = velTheta->getCPUValueAt(i, j - 1); } else { size_t oppositei = (i + nPhi/2) % nPhi; vN = 0.75 * velTheta->getCPUValueAt(i, j) - 0.25 * velTheta->getCPUValueAt(oppositei, j); } if (j != nTheta - 1) { vS = velTheta->getCPUValueAt(i, j); // ofv << vS << " "; } else { size_t oppositei = (i + nPhi/2) % nPhi; vS = 0.75 * velTheta->getCPUValueAt(i, j - 1) - 0.25 * velTheta->getCPUValueAt(oppositei, j - 1); } fReal u = 0.5 * (uW + uE); fReal v = 0.5 * (vN + vS); # ifdef WRITE_TXT ofu << u << " "; ofv << v << " "; # endif if (u > maxu) { maxu = u; maxuthetaid = j; maxuphiid = i; } if (u < minu) { minu = u; minuthetaid = j; minuphiid = i; } if (v > maxv) { maxv = v; maxvthetaid = j; maxvphiid = i; } if (v < minv) { minv = v; minvthetaid = j; minvphiid = i; } // std::cout << "theta " << j << " phi " << i << " u " << u << " v " << v << std::endl; images[0][j*nPhi+i] = float(u/2+0.5); // R images[1][j*nPhi+i] = float(v/2+0.5); // G images[2][j*nPhi+i] = float(0.5); // B } # ifdef WRITE_TXT ofu << std::endl; ofv << std::endl; # endif } std::cout << "max u = " << maxu << " theta " << maxuthetaid << " phi " << maxuphiid << std::endl; std::cout << "min u = " << minu << " theta " << minuthetaid << " phi " << minuphiid << std::endl; std::cout << "max v = " << maxv << " theta " << maxvthetaid << " phi " << maxvphiid << std::endl; std::cout << "min v = " << minv << " theta " << minvthetaid << " phi " << minvphiid << std::endl; write_image(img_string, nPhi, nTheta, images); } void Solver::write_concentration_image(const std::string& s, const int frame) { std::string img_string = std::to_string(frame); while (img_string.length() < 4) { img_string.insert(0, "0"); } img_string.insert(0, s + "con"); # ifdef WRITE_TXT std::string mat_string = img_string; mat_string.append(".txt"); std::ofstream of(mat_string); # endif img_string.append(".exr"); concentration->copyBackToCPU(); std::vector<float> images[3]; images[0].resize(nPhi * nTheta); images[1].resize(nPhi * nTheta); images[2].resize(nPhi * nTheta); for (size_t j = 0; j < nTheta; ++j) { for (size_t i = 0; i < nPhi; ++i) { fReal con = concentration->getCPUValueAt(i, j); images[0][j*nPhi+i] = (con - 0.9) / 0.2; images[1][j*nPhi+i] = (con - 0.9) / 0.2; images[2][j*nPhi+i] = (con - 0.9) / 0.2; # ifdef WRITE_TXT of << con << " "; # endif } # ifdef WRITE_TXT of << std::endl; # endif } write_image(img_string, nPhi, nTheta, images); } void Solver::write_thickness_img(const std::string& s, const int frame) { std::string img_string = std::to_string(frame); while (img_string.length() < 4) { img_string.insert(0, "0"); } img_string.insert(0, s + "frame"); # ifdef WRITE_TXT std::string mat_string = img_string; mat_string.append(".txt"); std::ofstream of(mat_string); # endif std::ofstream thick; thick.open("thickness1024_vdw.txt", std::ofstream::out | std::ofstream::app); img_string.append(".exr"); thickness->copyBackToCPU(); std::vector<float> images[3]; images[0].resize(nPhi * nTheta); images[1].resize(nPhi * nTheta); images[2].resize(nPhi * nTheta); fReal minE = 1.0; fReal ratio = 2e5; // * 10 this->setBroken(true); for (size_t j = 0; j < nTheta; ++j) { for (size_t i = 0; i < nPhi; ++i) { fReal Delta = thickness->getCPUValueAt(i, j); if (Delta > 0) { this->setBroken(false); } // return; //} else { if (Delta < minE) minE = Delta; images[0][j*nPhi+i] = Delta * this->H * ratio; images[1][j*nPhi+i] = Delta * this->H * ratio; images[2][j*nPhi+i] = Delta * this->H * ratio; # ifdef WRITE_TXT of << Delta * this->H * 2 << " "; # endif //} } # ifdef WRITE_TXT of << std::endl; # endif } write_image(img_string, nPhi, nTheta, images); std::cout << "min thickness " << minE << std::endl; } fReal Solver::getGridLen() { return this->gridLen; }
d80d206fe7f2d0d80d0d6982f077c68810eb401b.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> #include <math.h> #include <stdlib.h> #include <stdio.h> //#include <unistd.h> #include <hip/hip_runtime.h> //#include <hipfft.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "param.h" #include "particle.h" #define TWO_PI 6.283185307f __global__ void G_update_position(particle *G_boid, float *G_Fx, float *G_Fy ,float *G_sum_Vx, float *G_sum_Vy, hiprandState_t *state, unsigned int * G_random_number) { //Bem melhor sem memoria compartilhada int i=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; // float x,y; float module_displace; float component_x,component_y; float angle; if (i<N) { G_random_number[i] = hiprand(state) % RAND_MAX; angle = TWO_PI * (float) G_random_number[i] /RAND_MAX; component_x = ( G_sum_Vx[i] + G_Fx[i] + ETA * cos(angle)); component_y = ( G_sum_Vy[i] + G_Fy[i] + ETA * sin(angle)); module_displace = sqrt((component_x*component_x)+(component_y*component_y)); G_boid[i].x = G_boid[i].x + G_boid[i].v0 * component_x/module_displace; G_boid[i].y = G_boid[i].y + G_boid[i].v0 * component_y/module_displace; //boundary conditions if(G_boid[i].x>=L_TENTATIVA)G_boid[i].x=G_boid[i].x-L_TENTATIVA; if(G_boid[i].y>=L_TENTATIVA)G_boid[i].y=G_boid[i].y-L_TENTATIVA; if(G_boid[i].x<0)G_boid[i].x=G_boid[i].x+L_TENTATIVA; if(G_boid[i].y<0)G_boid[i].y=G_boid[i].y+L_TENTATIVA; G_boid[i].vx = G_boid[i].v0 * component_x/module_displace; G_boid[i].vy = G_boid[i].v0 * component_y/module_displace; } }; void update_position(particle *boid, float *fx, float *fy, float *sum_vx, float *sum_vy) { float angle; int i; float module_displace; float componente_x,componente_y; for(i=0;i<N;i++) { angle = drand48() * TWO_PI; componente_x = ( sum_vx[i] + fx[i] + ETA * cos(angle)); componente_y = ( sum_vy[i] + fy[i] + ETA * sin(angle)); module_displace = sqrt((componente_x*componente_x)+(componente_y*componente_y)); boid[i].x = boid[i].x + boid[i].v0 * componente_x/module_displace; boid[i].y = boid[i].y + boid[i].v0 * componente_y/module_displace; // condicoes de contorno if(boid[i].x>=L_TENTATIVA)boid[i].x=boid[i].x-L_TENTATIVA; if(boid[i].y>=L_TENTATIVA)boid[i].y=boid[i].y-L_TENTATIVA; if(boid[i].x<0)boid[i].x=boid[i].x+L_TENTATIVA; if(boid[i].y<0)boid[i].y=boid[i].y+L_TENTATIVA; boid[i].vx = boid[i].v0 * componente_x/module_displace; boid[i].vy = boid[i].v0 * componente_y/module_displace; } }
d80d206fe7f2d0d80d0d6982f077c68810eb401b.cu
#include <time.h> #include <math.h> #include <stdlib.h> #include <stdio.h> //#include <unistd.h> #include <cuda.h> //#include <cufft.h> #include <curand.h> #include <curand_kernel.h> #include "param.h" #include "particle.h" #define TWO_PI 6.283185307f __global__ void G_update_position(particle *G_boid, float *G_Fx, float *G_Fy ,float *G_sum_Vx, float *G_sum_Vy, curandState_t *state, unsigned int * G_random_number) { //Bem melhor sem memoria compartilhada int i=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; // float x,y; float module_displace; float component_x,component_y; float angle; if (i<N) { G_random_number[i] = curand(state) % RAND_MAX; angle = TWO_PI * (float) G_random_number[i] /RAND_MAX; component_x = ( G_sum_Vx[i] + G_Fx[i] + ETA * cos(angle)); component_y = ( G_sum_Vy[i] + G_Fy[i] + ETA * sin(angle)); module_displace = sqrt((component_x*component_x)+(component_y*component_y)); G_boid[i].x = G_boid[i].x + G_boid[i].v0 * component_x/module_displace; G_boid[i].y = G_boid[i].y + G_boid[i].v0 * component_y/module_displace; //boundary conditions if(G_boid[i].x>=L_TENTATIVA)G_boid[i].x=G_boid[i].x-L_TENTATIVA; if(G_boid[i].y>=L_TENTATIVA)G_boid[i].y=G_boid[i].y-L_TENTATIVA; if(G_boid[i].x<0)G_boid[i].x=G_boid[i].x+L_TENTATIVA; if(G_boid[i].y<0)G_boid[i].y=G_boid[i].y+L_TENTATIVA; G_boid[i].vx = G_boid[i].v0 * component_x/module_displace; G_boid[i].vy = G_boid[i].v0 * component_y/module_displace; } }; void update_position(particle *boid, float *fx, float *fy, float *sum_vx, float *sum_vy) { float angle; int i; float module_displace; float componente_x,componente_y; for(i=0;i<N;i++) { angle = drand48() * TWO_PI; componente_x = ( sum_vx[i] + fx[i] + ETA * cos(angle)); componente_y = ( sum_vy[i] + fy[i] + ETA * sin(angle)); module_displace = sqrt((componente_x*componente_x)+(componente_y*componente_y)); boid[i].x = boid[i].x + boid[i].v0 * componente_x/module_displace; boid[i].y = boid[i].y + boid[i].v0 * componente_y/module_displace; // condicoes de contorno if(boid[i].x>=L_TENTATIVA)boid[i].x=boid[i].x-L_TENTATIVA; if(boid[i].y>=L_TENTATIVA)boid[i].y=boid[i].y-L_TENTATIVA; if(boid[i].x<0)boid[i].x=boid[i].x+L_TENTATIVA; if(boid[i].y<0)boid[i].y=boid[i].y+L_TENTATIVA; boid[i].vx = boid[i].v0 * componente_x/module_displace; boid[i].vy = boid[i].v0 * componente_y/module_displace; } }
2e4ee4ce91bf865b5ceeef8207fc1947a448940b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ///////////////////////// // freqAnalyzer.cu // // Andrew Krepps // // Module 8 Assignment // // 4/2/2018 // ///////////////////////// #include <chrono> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <hipfft.h> /////////////////////////////////////////////////////////////////////////////// /// \brief find the magnitude of each frequency bin from FFT results /// /// \param [in] in the complex frequency-domain signal /// \param [out] out the magnitude of each frequency bin /// \param [in] n the number of samples /////////////////////////////////////////////////////////////////////////////// __global__ void calcMagnitudes(const hipfftComplex* in, float* out, const unsigned int n) { const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < n) { out[idx] = cuCabsf(in[idx]); } } /////////////////////////////////////////////////////////////////////////////// /// \brief run an FFT on a real signal and find the magnitude of each frequency /// bin /// /// \param [in] timeSignal the real time-domain signal /// \param [out] freqSignalMagnitudes the magnitude of each frequency bin /// \param [in] n the number of samples /// \param [in] blockSize the number of threads per block /// /// \returns total exection time (in ms) including data transfer /////////////////////////////////////////////////////////////////////////////// float runFFT(const float* timeSignal, float* freqSignalMagnitudes, const unsigned int n, const unsigned int blockSize) { // start clock auto start = std::chrono::high_resolution_clock::now(); // create FFT plan hipfftHandle plan; hipfftPlan1d(&plan, n, HIPFFT_R2C, 1); // allocate device memory hipfftReal* d_timeSignal; hipfftComplex* d_freqSignal; float* d_freqSignalMagnitudes; const unsigned int realBytes = n*sizeof(hipfftReal); const unsigned int complexBytes = n*sizeof(hipfftComplex); hipMalloc((void**)&d_timeSignal, realBytes); hipMalloc((void**)&d_freqSignal, complexBytes); hipMalloc((void**)&d_freqSignalMagnitudes, realBytes); // copy input data to device hipMemcpy(d_timeSignal, timeSignal, realBytes, hipMemcpyHostToDevice); // perform FFT hipfftExecR2C(plan, d_timeSignal, d_freqSignal); hipDeviceSynchronize(); // find magnitudes of frequency signal const unsigned int numBlocks = n/blockSize; hipLaunchKernelGGL(( calcMagnitudes), dim3(numBlocks), dim3(blockSize), 0, 0, d_freqSignal, d_freqSignalMagnitudes, n); // copy output data to host hipMemcpy(freqSignalMagnitudes, d_freqSignalMagnitudes, realBytes, hipMemcpyDeviceToHost); // free device memory hipFree(d_timeSignal); hipFree(d_freqSignal); hipFree(d_freqSignalMagnitudes); // calculate execution time in ms auto stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration(stop - start); return duration.count()*1000.0f; } /////////////////////////////////////////////////////////////////////////////// /// \brief initialize input data on the host /// /// \param [out] timeSignal the time-domain input signal /// \param [in] frequency the signal frequency (in Hz) /// \param [in] samplingRate the signal sampling rate (in Hz) /// \param [in] n the number of samples /////////////////////////////////////////////////////////////////////////////// void initializeInputData(float* timeSignal, float frequency, float samplingRate, const unsigned int n) { printf("Generating %f Hz signal\n", frequency); for (unsigned int i = 0; i < n; ++i) { timeSignal[i] = sin(2.0f*M_PI*frequency*i/samplingRate); } } /////////////////////////////////////////////////////////////////////////////// /// \brief extract strongest frequency bin from FFT results /// /// \param [in] freqSignalMagnitudes the magnitude of each frequency bin /// \param [in] samplingRate the signal sampling rate (in Hz) /// \param [in] n the number of samples /// /// \returns the frequency (in Hz) of the bin with the largest magnitude /////////////////////////////////////////////////////////////////////////////// float extractFrequency(const float* freqSignalMagnitudes, float samplingRate, const unsigned int n) { // find frequency bin with highest magnitude unsigned int maxIdx = 0; float maxVal = 0.0f; // since input was a real signal, we only need to consider the first half of the output for (unsigned int i = 0; i < n/2; ++i) { if (freqSignalMagnitudes[i] > maxVal) { maxVal = freqSignalMagnitudes[i]; maxIdx = i; } } // calculate frequency of selected bin return maxIdx*samplingRate/n; } int main(int argc, char** argv) { // configure run unsigned int dataSize = 512; unsigned int blockSize = 256; float frequency = 128.0f; float samplingRate = 512.0f; if (argc > 1) { dataSize = atoi(argv[1]); } if (argc > 2) { blockSize = atoi(argv[2]); } if (argc > 3) { frequency = atof(argv[3]); } if (argc > 4) { samplingRate = atof(argv[4]); } // allocate and initialize host memory const unsigned int numBytes = dataSize*sizeof(float); float* timeSignal = (float*)malloc(numBytes); float* freqSignalMagnitudes = (float*)malloc(numBytes); initializeInputData(timeSignal, frequency, samplingRate, dataSize); // dummy execution to avoid startup cost runFFT(timeSignal, freqSignalMagnitudes, dataSize, blockSize); // extract frequency from signal float ms = runFFT(timeSignal, freqSignalMagnitudes, dataSize, blockSize); float foundFrequency = extractFrequency(freqSignalMagnitudes, samplingRate, dataSize); // show results printf("Strongest frequency bin was %f Hz (execution time = %.3f ms)\n", foundFrequency, ms); // free host memory free(timeSignal); free(freqSignalMagnitudes); }
2e4ee4ce91bf865b5ceeef8207fc1947a448940b.cu
///////////////////////// // freqAnalyzer.cu // // Andrew Krepps // // Module 8 Assignment // // 4/2/2018 // ///////////////////////// #include <chrono> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <cufft.h> /////////////////////////////////////////////////////////////////////////////// /// \brief find the magnitude of each frequency bin from FFT results /// /// \param [in] in the complex frequency-domain signal /// \param [out] out the magnitude of each frequency bin /// \param [in] n the number of samples /////////////////////////////////////////////////////////////////////////////// __global__ void calcMagnitudes(const cufftComplex* in, float* out, const unsigned int n) { const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < n) { out[idx] = cuCabsf(in[idx]); } } /////////////////////////////////////////////////////////////////////////////// /// \brief run an FFT on a real signal and find the magnitude of each frequency /// bin /// /// \param [in] timeSignal the real time-domain signal /// \param [out] freqSignalMagnitudes the magnitude of each frequency bin /// \param [in] n the number of samples /// \param [in] blockSize the number of threads per block /// /// \returns total exection time (in ms) including data transfer /////////////////////////////////////////////////////////////////////////////// float runFFT(const float* timeSignal, float* freqSignalMagnitudes, const unsigned int n, const unsigned int blockSize) { // start clock auto start = std::chrono::high_resolution_clock::now(); // create FFT plan cufftHandle plan; cufftPlan1d(&plan, n, CUFFT_R2C, 1); // allocate device memory cufftReal* d_timeSignal; cufftComplex* d_freqSignal; float* d_freqSignalMagnitudes; const unsigned int realBytes = n*sizeof(cufftReal); const unsigned int complexBytes = n*sizeof(cufftComplex); cudaMalloc((void**)&d_timeSignal, realBytes); cudaMalloc((void**)&d_freqSignal, complexBytes); cudaMalloc((void**)&d_freqSignalMagnitudes, realBytes); // copy input data to device cudaMemcpy(d_timeSignal, timeSignal, realBytes, cudaMemcpyHostToDevice); // perform FFT cufftExecR2C(plan, d_timeSignal, d_freqSignal); cudaDeviceSynchronize(); // find magnitudes of frequency signal const unsigned int numBlocks = n/blockSize; calcMagnitudes<<<numBlocks, blockSize>>>(d_freqSignal, d_freqSignalMagnitudes, n); // copy output data to host cudaMemcpy(freqSignalMagnitudes, d_freqSignalMagnitudes, realBytes, cudaMemcpyDeviceToHost); // free device memory cudaFree(d_timeSignal); cudaFree(d_freqSignal); cudaFree(d_freqSignalMagnitudes); // calculate execution time in ms auto stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration(stop - start); return duration.count()*1000.0f; } /////////////////////////////////////////////////////////////////////////////// /// \brief initialize input data on the host /// /// \param [out] timeSignal the time-domain input signal /// \param [in] frequency the signal frequency (in Hz) /// \param [in] samplingRate the signal sampling rate (in Hz) /// \param [in] n the number of samples /////////////////////////////////////////////////////////////////////////////// void initializeInputData(float* timeSignal, float frequency, float samplingRate, const unsigned int n) { printf("Generating %f Hz signal\n", frequency); for (unsigned int i = 0; i < n; ++i) { timeSignal[i] = sin(2.0f*M_PI*frequency*i/samplingRate); } } /////////////////////////////////////////////////////////////////////////////// /// \brief extract strongest frequency bin from FFT results /// /// \param [in] freqSignalMagnitudes the magnitude of each frequency bin /// \param [in] samplingRate the signal sampling rate (in Hz) /// \param [in] n the number of samples /// /// \returns the frequency (in Hz) of the bin with the largest magnitude /////////////////////////////////////////////////////////////////////////////// float extractFrequency(const float* freqSignalMagnitudes, float samplingRate, const unsigned int n) { // find frequency bin with highest magnitude unsigned int maxIdx = 0; float maxVal = 0.0f; // since input was a real signal, we only need to consider the first half of the output for (unsigned int i = 0; i < n/2; ++i) { if (freqSignalMagnitudes[i] > maxVal) { maxVal = freqSignalMagnitudes[i]; maxIdx = i; } } // calculate frequency of selected bin return maxIdx*samplingRate/n; } int main(int argc, char** argv) { // configure run unsigned int dataSize = 512; unsigned int blockSize = 256; float frequency = 128.0f; float samplingRate = 512.0f; if (argc > 1) { dataSize = atoi(argv[1]); } if (argc > 2) { blockSize = atoi(argv[2]); } if (argc > 3) { frequency = atof(argv[3]); } if (argc > 4) { samplingRate = atof(argv[4]); } // allocate and initialize host memory const unsigned int numBytes = dataSize*sizeof(float); float* timeSignal = (float*)malloc(numBytes); float* freqSignalMagnitudes = (float*)malloc(numBytes); initializeInputData(timeSignal, frequency, samplingRate, dataSize); // dummy execution to avoid startup cost runFFT(timeSignal, freqSignalMagnitudes, dataSize, blockSize); // extract frequency from signal float ms = runFFT(timeSignal, freqSignalMagnitudes, dataSize, blockSize); float foundFrequency = extractFrequency(freqSignalMagnitudes, samplingRate, dataSize); // show results printf("Strongest frequency bin was %f Hz (execution time = %.3f ms)\n", foundFrequency, ms); // free host memory free(timeSignal); free(freqSignalMagnitudes); }
d854ff2234b98eb630c7a5553579014df2f64b89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "optimizer/prepare_functions.h" #include <algorithm> namespace SparseOperationKit { __global__ void gen_position_for_indices_kernel(const int64_t* indices, const size_t elem_num, int64_t* positions) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < elem_num) { positions[gid] = gid; } } void gen_position_for_indices(const int64_t* indices, const size_t elem_num, int64_t* positions, hipStream_t stream) { size_t block_size = 64; size_t grid_size = (block_size + elem_num - 1) / block_size; hipLaunchKernelGGL(( gen_position_for_indices_kernel), dim3(grid_size), dim3(block_size), 0, stream, indices, elem_num, positions); } __global__ void gen_unique_flags_for_indices_kernel(const int64_t* indices, const size_t elem_num, uint32_t* flags) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; size_t stride = blockDim.x * gridDim.x; for (size_t i = gid; i < elem_num; i += stride) { int64_t cur_indice = indices[i]; if (i > 0) { int64_t former_indice = indices[i - 1]; if (cur_indice != former_indice) flags[i] = 1; // it is an unique indice else flags[i] = 0; // it is not an unique indice } else { flags[i] = 1; // it is an unique indice } } } void gen_unique_flags_for_indices(const int64_t* indices, const size_t elem_num, uint32_t* flags, hipStream_t stream) { constexpr size_t max_grid_size = 384; size_t block_size = 256; size_t grid_size = ::min(max_grid_size, (elem_num - 1) / block_size + 1); hipLaunchKernelGGL(( gen_unique_flags_for_indices_kernel), dim3(grid_size), dim3(block_size), 0, stream, indices, elem_num, flags); } __global__ void gen_unique_indexes_for_indices_kernel(const uint32_t* flags, const uint32_t* prefix_sums, const size_t elem_num, size_t* indexes, uint32_t* num_of_uniques) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; size_t stride = blockDim.x * gridDim.x; for (size_t i = gid; i < elem_num; i += stride) { uint32_t flag = flags[i]; if (1 == flag) indexes[prefix_sums[i] - 1] = i; } if (0 == gid) { *num_of_uniques = prefix_sums[elem_num - 1]; indexes[*num_of_uniques] = elem_num; } } void gen_unique_indexes_for_indices(const uint32_t* flags, const uint32_t* prefix_sums, const size_t elem_num, size_t* indexes, uint32_t* num_of_uniques, hipStream_t stream) { constexpr size_t max_grid_size = 384; size_t block_size = 256; size_t grid_size = ::min(max_grid_size, (elem_num - 1) / block_size + 1); hipLaunchKernelGGL(( gen_unique_indexes_for_indices_kernel), dim3(grid_size), dim3(block_size), 0, stream, flags, prefix_sums, elem_num, indexes, num_of_uniques); } } // namespace SparseOperationKit
d854ff2234b98eb630c7a5553579014df2f64b89.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "optimizer/prepare_functions.h" #include <algorithm> namespace SparseOperationKit { __global__ void gen_position_for_indices_kernel(const int64_t* indices, const size_t elem_num, int64_t* positions) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < elem_num) { positions[gid] = gid; } } void gen_position_for_indices(const int64_t* indices, const size_t elem_num, int64_t* positions, cudaStream_t stream) { size_t block_size = 64; size_t grid_size = (block_size + elem_num - 1) / block_size; gen_position_for_indices_kernel<<<grid_size, block_size, 0, stream>>>( indices, elem_num, positions); } __global__ void gen_unique_flags_for_indices_kernel(const int64_t* indices, const size_t elem_num, uint32_t* flags) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; size_t stride = blockDim.x * gridDim.x; for (size_t i = gid; i < elem_num; i += stride) { int64_t cur_indice = indices[i]; if (i > 0) { int64_t former_indice = indices[i - 1]; if (cur_indice != former_indice) flags[i] = 1; // it is an unique indice else flags[i] = 0; // it is not an unique indice } else { flags[i] = 1; // it is an unique indice } } } void gen_unique_flags_for_indices(const int64_t* indices, const size_t elem_num, uint32_t* flags, cudaStream_t stream) { constexpr size_t max_grid_size = 384; size_t block_size = 256; size_t grid_size = std::min(max_grid_size, (elem_num - 1) / block_size + 1); gen_unique_flags_for_indices_kernel<<<grid_size, block_size, 0, stream>>>( indices, elem_num, flags); } __global__ void gen_unique_indexes_for_indices_kernel(const uint32_t* flags, const uint32_t* prefix_sums, const size_t elem_num, size_t* indexes, uint32_t* num_of_uniques) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; size_t stride = blockDim.x * gridDim.x; for (size_t i = gid; i < elem_num; i += stride) { uint32_t flag = flags[i]; if (1 == flag) indexes[prefix_sums[i] - 1] = i; } if (0 == gid) { *num_of_uniques = prefix_sums[elem_num - 1]; indexes[*num_of_uniques] = elem_num; } } void gen_unique_indexes_for_indices(const uint32_t* flags, const uint32_t* prefix_sums, const size_t elem_num, size_t* indexes, uint32_t* num_of_uniques, cudaStream_t stream) { constexpr size_t max_grid_size = 384; size_t block_size = 256; size_t grid_size = std::min(max_grid_size, (elem_num - 1) / block_size + 1); gen_unique_indexes_for_indices_kernel<<<grid_size, block_size, 0, stream>>>( flags, prefix_sums, elem_num, indexes, num_of_uniques); } } // namespace SparseOperationKit
4e8f9e10842a4929f41459347cc8aaece3da45c0.hip
// !!! This is a file automatically generated by hipify!!! #include "neural_net.h" void NeuralNet::getComputationTime(void *X, int *y, double learning_rate, std::vector<float> &fwd_computation_time, std::vector<float> &bwd_computation_time) { for (int i = 0; i < num_layers; i++) prefetched[i] = false; // checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); // checkCudaErrors(hipMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(this->y, y, batch_size * data_type_size, hipMemcpyHostToDevice)); float alpha = 1.0, beta = 0.0; float Salpha = 1.0, Sbeta = 0.0; double Dalpha = 1.0, Dbeta = 0.0; // forward propagate for (int i = 0; i < num_layers; i++) { size_t cur_workspace_size; void *cur_workspace; checkCNMEM(cnmemMalloc(&layer_input[i], layer_input_size[i] * data_type_size, NULL)); checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } checkCudaErrors(hipEventRecord(start_compute, stream_compute)); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; // computation checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->filter_desc, cur_params->W, cur_params->conv_desc, cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta, cur_params->output_tensor, layer_input[i + 1])); checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha, cur_params->output_tensor, layer_input[i + 1])); // if activation required if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } } else if (layer_type[i] == FULLY_CONNECTED) { // std::cout << "FC\n"; FCLayerParams *cur_params = (FCLayerParams *)params[i]; // std::cout << "FChere" << i << std::endl; if (data_type == CUDNN_DATA_FLOAT) { checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out, (float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1], cur_params->C_out)); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out, (double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1], cur_params->C_out)); } if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->factor, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->output_tensor, layer_input[i + 1])); } else if (layer_type[i] == ACTV) { std::cout << "Panic!! ACTV wrong place\n"; exit(0); ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } else if (layer_type[i] == SOFTMAX) { // std::cout << "Softmax\n"; std::cout << "Panic!! SOFTMAX wrong place\n"; exit(0); SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } // ---------------------- vDNN start ---------------------- // synchronization // checkCudaErrors(hipDeviceSynchronize()); // if next layer is ACTV or SOFTMAX, complete that and come to synchronization // the case in above if for ACTV and SOFTMAX never occurs if (layer_type[i + 1] == SOFTMAX) { i++; layer_input[i + 1] = layer_input[i]; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); i--; } // sync with stream_compute guaranteed checkCudaErrors(hipEventRecord(stop_compute, stream_compute)); checkCudaErrors(hipEventSynchronize(stop_compute)); float compute_time = 0; checkCudaErrors(hipEventElapsedTime(&compute_time, start_compute, stop_compute)); fwd_computation_time.push_back(compute_time); if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); } checkCNMEM(cnmemFree(layer_input[i], NULL)); checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } // ---------------------- vDNN end ------------------------ } // time for loss compute ignored // *scalar_loss = computeLoss(); // time for softmax backward ignored // ---------------------- vDNN start ---------------------- // checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); // space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); // // std::cout << "Free bytes: " << free_bytes << std::endl; // // ---------------------- vDNN end ------------------------ // if (layer_type[num_layers - 1] == SOFTMAX) { // // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1]; // if (data_type == CUDNN_DATA_FLOAT) { // checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float))); // softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (float *)layer_input[num_layers], // (float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); // } // else if (data_type == CUDNN_DATA_DOUBLE) { // checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double))); // softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (double *)layer_input[num_layers], // (double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); // } // } for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); checkCNMEM(cnmemMalloc(&layer_input[i], layer_input_size[i] * data_type_size, NULL)); checkCNMEM(cnmemMalloc(&dlayer_input[i + 1], layer_input_size[i] * data_type_size, NULL)); if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); } } // ---------------------- vDNN end ------------------------ if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } if (!(i + 1 < num_layers && layer_type[i + 1] == SOFTMAX)) checkCudaErrors(hipEventRecord(start_compute, stream_compute)); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1], &beta, cur_params->bias_desc, cur_params->db)); // std::cout << "neural_net: backward conv i:" << i << std::endl; checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta, cur_params->filter_desc, cur_params->dW)); if (i > 0) checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W, cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace, cur_workspace_size, &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "Free bytes: " << free_bytes << std::endl; // std::cout << "here\n"; cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } if (data_type == CUDNN_DATA_FLOAT) { // bias backward checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)one_vec, batch_size, &Sbeta, (float *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)dlayer_input[i + 1], cur_params->C_out, &Sbeta, (float *)dlayer_input[i], cur_params->C_in)); } else if (data_type == CUDNN_DATA_DOUBLE) { // bias backward checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)one_vec, batch_size, &Dbeta, (double *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)dlayer_input[i + 1], cur_params->C_out, &Dbeta, (double *)dlayer_input[i], cur_params->C_in)); } cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->sbmv_desc, cur_params->scale, cur_params->dscale, cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); } else if (layer_type[i] == ACTV) { ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); continue; } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "compute here\n"; continue; } // ---------------------- vDNN start ---------------------- // checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipEventRecord(stop_compute, stream_compute)); checkCudaErrors(hipEventSynchronize(stop_compute)); float compute_time; checkCudaErrors(hipEventElapsedTime(&compute_time, start_compute, stop_compute)); bwd_computation_time.insert(bwd_computation_time.begin(), compute_time); if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == BATCHNORM) { if (!pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL)); checkCNMEM(cnmemFree(layer_input[i], NULL)); if (i > 0 && layer_type[i] != SOFTMAX) checkCNMEM(cnmemFree(dlayer_input[i], NULL)); } } void NeuralNet::getTransferTime(void *X, int *y, double learning_rate, std::vector<float> &fwd_transfer_time, std::vector<float> &bwd_transfer_time) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == SOFTMAX) continue; void *device_data; void *host_data; checkCNMEM(cnmemMalloc(&device_data, layer_input_size[i] * data_type_size, NULL)); checkCudaErrors(hipHostMalloc(&host_data, layer_input_size[i] * data_type_size)); checkCudaErrors(hipEventRecord(start_transfer, stream_memory)); checkCudaErrors(hipMemcpyAsync(host_data, device_data, layer_input_size[i] * data_type_size, hipMemcpyDeviceToHost, stream_memory)); checkCudaErrors(hipEventRecord(stop_transfer, stream_memory)); checkCudaErrors(hipEventSynchronize(stop_transfer)); float transfer_time; checkCudaErrors(hipEventElapsedTime(&transfer_time, start_transfer, stop_transfer)); fwd_transfer_time.push_back(transfer_time); checkCudaErrors(hipEventRecord(start_transfer, stream_memory)); checkCudaErrors(hipMemcpyAsync(device_data, host_data, layer_input_size[i] * data_type_size, hipMemcpyHostToDevice, stream_memory)); checkCudaErrors(hipEventRecord(stop_transfer, stream_memory)); checkCudaErrors(hipEventSynchronize(stop_transfer)); checkCudaErrors(hipEventElapsedTime(&transfer_time, start_transfer, stop_transfer)); bwd_transfer_time.push_back(transfer_time); } }
4e8f9e10842a4929f41459347cc8aaece3da45c0.cu
#include "neural_net.h" void NeuralNet::getComputationTime(void *X, int *y, double learning_rate, std::vector<float> &fwd_computation_time, std::vector<float> &bwd_computation_time) { for (int i = 0; i < num_layers; i++) prefetched[i] = false; // checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); // checkCudaErrors(cudaMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(this->y, y, batch_size * data_type_size, cudaMemcpyHostToDevice)); float alpha = 1.0, beta = 0.0; float Salpha = 1.0, Sbeta = 0.0; double Dalpha = 1.0, Dbeta = 0.0; // forward propagate for (int i = 0; i < num_layers; i++) { size_t cur_workspace_size; void *cur_workspace; checkCNMEM(cnmemMalloc(&layer_input[i], layer_input_size[i] * data_type_size, NULL)); checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } checkCudaErrors(cudaEventRecord(start_compute, stream_compute)); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; // computation checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->filter_desc, cur_params->W, cur_params->conv_desc, cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta, cur_params->output_tensor, layer_input[i + 1])); checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha, cur_params->output_tensor, layer_input[i + 1])); // if activation required if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } } else if (layer_type[i] == FULLY_CONNECTED) { // std::cout << "FC\n"; FCLayerParams *cur_params = (FCLayerParams *)params[i]; // std::cout << "FChere" << i << std::endl; if (data_type == CUDNN_DATA_FLOAT) { checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out, (float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1], cur_params->C_out)); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out, (double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1], cur_params->C_out)); } if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->factor, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->output_tensor, layer_input[i + 1])); } else if (layer_type[i] == ACTV) { std::cout << "Panic!! ACTV wrong place\n"; exit(0); ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } else if (layer_type[i] == SOFTMAX) { // std::cout << "Softmax\n"; std::cout << "Panic!! SOFTMAX wrong place\n"; exit(0); SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } // ---------------------- vDNN start ---------------------- // synchronization // checkCudaErrors(cudaDeviceSynchronize()); // if next layer is ACTV or SOFTMAX, complete that and come to synchronization // the case in above if for ACTV and SOFTMAX never occurs if (layer_type[i + 1] == SOFTMAX) { i++; layer_input[i + 1] = layer_input[i]; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); i--; } // sync with stream_compute guaranteed checkCudaErrors(cudaEventRecord(stop_compute, stream_compute)); checkCudaErrors(cudaEventSynchronize(stop_compute)); float compute_time = 0; checkCudaErrors(cudaEventElapsedTime(&compute_time, start_compute, stop_compute)); fwd_computation_time.push_back(compute_time); if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); } checkCNMEM(cnmemFree(layer_input[i], NULL)); checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } // ---------------------- vDNN end ------------------------ } // time for loss compute ignored // *scalar_loss = computeLoss(); // time for softmax backward ignored // ---------------------- vDNN start ---------------------- // checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); // space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); // // std::cout << "Free bytes: " << free_bytes << std::endl; // // ---------------------- vDNN end ------------------------ // if (layer_type[num_layers - 1] == SOFTMAX) { // // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1]; // if (data_type == CUDNN_DATA_FLOAT) { // checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float))); // softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (float *)layer_input[num_layers], // (float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); // } // else if (data_type == CUDNN_DATA_DOUBLE) { // checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double))); // softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (double *)layer_input[num_layers], // (double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); // } // } for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); checkCNMEM(cnmemMalloc(&layer_input[i], layer_input_size[i] * data_type_size, NULL)); checkCNMEM(cnmemMalloc(&dlayer_input[i + 1], layer_input_size[i] * data_type_size, NULL)); if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); } } // ---------------------- vDNN end ------------------------ if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); } } if (!(i + 1 < num_layers && layer_type[i + 1] == SOFTMAX)) checkCudaErrors(cudaEventRecord(start_compute, stream_compute)); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1], &beta, cur_params->bias_desc, cur_params->db)); // std::cout << "neural_net: backward conv i:" << i << std::endl; checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta, cur_params->filter_desc, cur_params->dW)); if (i > 0) checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W, cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace, cur_workspace_size, &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "Free bytes: " << free_bytes << std::endl; // std::cout << "here\n"; cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } if (data_type == CUDNN_DATA_FLOAT) { // bias backward checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)one_vec, batch_size, &Sbeta, (float *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)dlayer_input[i + 1], cur_params->C_out, &Sbeta, (float *)dlayer_input[i], cur_params->C_in)); } else if (data_type == CUDNN_DATA_DOUBLE) { // bias backward checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)one_vec, batch_size, &Dbeta, (double *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)dlayer_input[i + 1], cur_params->C_out, &Dbeta, (double *)dlayer_input[i], cur_params->C_in)); } cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->sbmv_desc, cur_params->scale, cur_params->dscale, cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); } else if (layer_type[i] == ACTV) { ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); continue; } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "compute here\n"; continue; } // ---------------------- vDNN start ---------------------- // checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaEventRecord(stop_compute, stream_compute)); checkCudaErrors(cudaEventSynchronize(stop_compute)); float compute_time; checkCudaErrors(cudaEventElapsedTime(&compute_time, start_compute, stop_compute)); bwd_computation_time.insert(bwd_computation_time.begin(), compute_time); if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } else if (layer_type[i] == BATCHNORM) { if (!pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); } } checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL)); checkCNMEM(cnmemFree(layer_input[i], NULL)); if (i > 0 && layer_type[i] != SOFTMAX) checkCNMEM(cnmemFree(dlayer_input[i], NULL)); } } void NeuralNet::getTransferTime(void *X, int *y, double learning_rate, std::vector<float> &fwd_transfer_time, std::vector<float> &bwd_transfer_time) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == SOFTMAX) continue; void *device_data; void *host_data; checkCNMEM(cnmemMalloc(&device_data, layer_input_size[i] * data_type_size, NULL)); checkCudaErrors(cudaMallocHost(&host_data, layer_input_size[i] * data_type_size)); checkCudaErrors(cudaEventRecord(start_transfer, stream_memory)); checkCudaErrors(cudaMemcpyAsync(host_data, device_data, layer_input_size[i] * data_type_size, cudaMemcpyDeviceToHost, stream_memory)); checkCudaErrors(cudaEventRecord(stop_transfer, stream_memory)); checkCudaErrors(cudaEventSynchronize(stop_transfer)); float transfer_time; checkCudaErrors(cudaEventElapsedTime(&transfer_time, start_transfer, stop_transfer)); fwd_transfer_time.push_back(transfer_time); checkCudaErrors(cudaEventRecord(start_transfer, stream_memory)); checkCudaErrors(cudaMemcpyAsync(device_data, host_data, layer_input_size[i] * data_type_size, cudaMemcpyHostToDevice, stream_memory)); checkCudaErrors(cudaEventRecord(stop_transfer, stream_memory)); checkCudaErrors(cudaEventSynchronize(stop_transfer)); checkCudaErrors(cudaEventElapsedTime(&transfer_time, start_transfer, stop_transfer)); bwd_transfer_time.push_back(transfer_time); } }
c2eeeddde63abd925380416ccc4a4a2bee6d0da0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "abs_float.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int idx = 1; float *dy = NULL; hipMalloc(&dy, XSIZE*YSIZE); int incy = 1; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( abs_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( abs_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( abs_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c2eeeddde63abd925380416ccc4a4a2bee6d0da0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "abs_float.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int idx = 1; float *dy = NULL; cudaMalloc(&dy, XSIZE*YSIZE); int incy = 1; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); abs_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { abs_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { abs_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fcbf02cd982193bf8e2192c2fb966cf42bf15345.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BlockGetFlag.h" void BlockGetFlag::init() { HANDLE_ERROR(hipMalloc(&srcd, sizeof(uchar)*rows*cols*3)); gmat_src = new cv::cuda::GpuMat(rows, cols, CV_8UC3, srcd); HANDLE_ERROR(hipMalloc(&grayd, sizeof(uchar)*rows*cols)); gmat_gray = new cv::cuda::GpuMat(rows, cols, CV_8UC1, grayd); HANDLE_ERROR(hipMalloc(&blurd, sizeof(uchar)*rows*cols)); gmat_blur = new cv::cuda::GpuMat(rows, cols, CV_8UC1, blurd); HANDLE_ERROR(hipMalloc(&fMapd, sizeof(uchar)*rows*cols)); HANDLE_ERROR(hipHostMalloc(&fMaph, rows*cols*sizeof(uchar))); gauss = cv::cuda::createGaussianFilter(CV_8U, CV_8U, cv::Size(GFSize, GFSize), GFs1, GFs2); cv::cuda::cvtColor(*gmat_src, *gmat_gray, CV_RGB2GRAY); gauss->apply(*gmat_gray, *gmat_blur); } void BlockGetFlag::deinit() { HANDLE_ERROR(hipFree(srcd)); HANDLE_ERROR(hipFree(grayd)); HANDLE_ERROR(hipFree(blurd)); HANDLE_ERROR(hipFree(fMapd)); HANDLE_ERROR(hipHostFree(fMaph)); } void BlockGetFlag::enqueue(cv::Mat& sMaph, cv::cuda::Stream& cvstream) { // GPU Block const dim3 dimBlock(32,32);; // GPU Grid const dim3 dimGrid((cols - 4 + 27) / 28, (rows - 4 + 27) / 28); hipStream_t custream = cv::cuda::StreamAccessor::getStream(cvstream); HANDLE_ERROR(hipMemcpyAsync(srcd, sMaph.data, sizeof(uchar)*rows*cols*3, hipMemcpyHostToDevice, custream)); cv::cuda::cvtColor(*gmat_src, *gmat_gray, CV_RGB2GRAY, 0, cvstream); gauss->apply(*gmat_gray, *gmat_blur, cvstream); hipLaunchKernelGGL(( kernelC), dim3(dimGrid), dim3(dimBlock), 0,custream , blurd, fMapd, cols, rows, th, k); HANDLE_ERROR(hipMemcpyAsync(fMaph, fMapd, sizeof(uchar)*rows*cols, hipMemcpyDeviceToHost, custream)); } __global__ void kernelC(uchar *blur, uchar *fMap, int gcols, int grows, int ANCHOR_TH, int K) { #define LIDX(x, y) [(x) + (y)*lcols] #define GIDX(x, y) [(x) + (y)*gcols] const int &lx = threadIdx.x; const int &ly = threadIdx.y; const int &lcols = blockDim.x; const int &lrows = blockDim.y; int gx = blockIdx.x*(lcols - 4) + threadIdx.x; int gy = blockIdx.y*(lrows - 4) + threadIdx.y; int dx = 0; int dy = 0; float val = 0; uchar dir = 0; uchar flag1 = 0; uchar flag2 = 0; int &com = dx; uchar center = 0; uchar fmap = 0; uchar &a = flag1; uchar &b = flag2; uchar &c = center; // uchar r; __shared__ volatile uchar sblur[32*32]; __shared__ volatile uchar sgMap[32*32]; // 4.362 ms // 7.5 ms if(gx<gcols && gy<grows) sblur LIDX(lx, ly) = blur GIDX(gx, gy); __syncthreads(); // 11.767 ms // 17.6 ms if(lx!=0 && ly!=0 && lx<(lcols-1) && ly<(lrows-1) && gx<(gcols-1) && gy<(grows-1)) { dx = sblur LIDX(lx+1,ly-1); dx += 2 * sblur LIDX(lx+1,ly); dx += sblur LIDX(lx+1,ly+1); dx -= sblur LIDX(lx-1,ly-1); dx -= 2 * sblur LIDX(lx-1,ly); dx -= sblur LIDX(lx-1,ly+1); dx = abs(dx); dy = sblur LIDX(lx-1,ly-1); dy += 2 * sblur LIDX(lx,ly-1); dy += sblur LIDX(lx+1,ly-1); dy -= sblur LIDX(lx-1,ly+1); dy -= 2 * sblur LIDX(lx,ly+1); dy -= sblur LIDX(lx+1,ly+1); dy = abs(dy); val = 0.5f*dx + 0.5f*dy; if (val > 255) val = 255.0f; // 1 -- vertical 0 -- horizonal dir = dx > dy; fmap |= (dir<<7)&0x80; center = (uchar)(val); sgMap LIDX(lx,ly) = center; // gMap GIDX(gx,gy) = center; //debug // if(gx==732 && gy==1445) // { // printf("%d\n", center); // } } __syncthreads(); // 29.3 ms // 21.341 ms if(lx>1 && ly>1 && lx<(lcols-2) && ly<(lrows-2) && gx<(gcols-2) && gy<(grows-2)) { // h flag1 = !dir; com = center; com -= sgMap LIDX(lx, ly-1); flag1 &= com>=ANCHOR_TH; com = center; com -= sgMap LIDX(lx, ly+1); flag1 &= com>=ANCHOR_TH; // v flag2 = dir; com = center; com -= sgMap LIDX(lx-1,ly); flag2 &= com >= ANCHOR_TH; com = center; com -= sgMap LIDX(lx+1,ly); flag2 &= com >= ANCHOR_TH; fmap |= (((flag1 | flag2) && ((gx-1)%K)==0 && ((gy-1)%K)==0)<<6)&0x40; // fmap // 0 0 0 0 0 0 0 0 // | | // -dir| // -keypoint // dir : 1 -- 0 -- // keypoint: // : // A - // B - // : // A - // B - // a b c // // a = sgMap LIDX(lx - 1, ly - 1); if(dir) // { b = sgMap LIDX(lx, ly - 1); c = sgMap LIDX(lx + 1, ly - 1); } else { b = sgMap LIDX(lx - 1, ly); c = sgMap LIDX(lx - 1, ly + 1); } fmap |= (a>b && a>c) << 5; fmap |= (b>a && b>c) << 4; fmap |= (c>a && c>b) << 3; // a b c // // if(dir) // { a = sgMap LIDX(lx - 1, ly + 1); b = sgMap LIDX(lx, ly + 1); } else { a = sgMap LIDX(lx + 1, ly - 1); b = sgMap LIDX(lx + 1, ly); } c = sgMap LIDX(lx + 1, ly + 1); fmap |= (a>b && a>c) << 2; fmap |= (b>a && b>c) << 1; fmap |= (c>a && c>b) << 0; // // debug // if(gx==114 && gy==10) // { // printf("%d\n", a); // printf("%d\n", b); // printf("%d\n", c); // } fMap GIDX(gx,gy) = fmap; } // 50.641 ms }
fcbf02cd982193bf8e2192c2fb966cf42bf15345.cu
#include "BlockGetFlag.h" void BlockGetFlag::init() { HANDLE_ERROR(cudaMalloc(&srcd, sizeof(uchar)*rows*cols*3)); gmat_src = new cv::cuda::GpuMat(rows, cols, CV_8UC3, srcd); HANDLE_ERROR(cudaMalloc(&grayd, sizeof(uchar)*rows*cols)); gmat_gray = new cv::cuda::GpuMat(rows, cols, CV_8UC1, grayd); HANDLE_ERROR(cudaMalloc(&blurd, sizeof(uchar)*rows*cols)); gmat_blur = new cv::cuda::GpuMat(rows, cols, CV_8UC1, blurd); HANDLE_ERROR(cudaMalloc(&fMapd, sizeof(uchar)*rows*cols)); HANDLE_ERROR(cudaMallocHost(&fMaph, rows*cols*sizeof(uchar))); gauss = cv::cuda::createGaussianFilter(CV_8U, CV_8U, cv::Size(GFSize, GFSize), GFs1, GFs2); cv::cuda::cvtColor(*gmat_src, *gmat_gray, CV_RGB2GRAY); gauss->apply(*gmat_gray, *gmat_blur); } void BlockGetFlag::deinit() { HANDLE_ERROR(cudaFree(srcd)); HANDLE_ERROR(cudaFree(grayd)); HANDLE_ERROR(cudaFree(blurd)); HANDLE_ERROR(cudaFree(fMapd)); HANDLE_ERROR(cudaFreeHost(fMaph)); } void BlockGetFlag::enqueue(cv::Mat& sMaph, cv::cuda::Stream& cvstream) { // GPU Block 划分 const dim3 dimBlock(32,32);; // GPU Grid 划分 const dim3 dimGrid((cols - 4 + 27) / 28, (rows - 4 + 27) / 28); cudaStream_t custream = cv::cuda::StreamAccessor::getStream(cvstream); HANDLE_ERROR(cudaMemcpyAsync(srcd, sMaph.data, sizeof(uchar)*rows*cols*3, cudaMemcpyHostToDevice, custream)); cv::cuda::cvtColor(*gmat_src, *gmat_gray, CV_RGB2GRAY, 0, cvstream); gauss->apply(*gmat_gray, *gmat_blur, cvstream); kernelC<<< dimGrid, dimBlock, 0,custream >>>(blurd, fMapd, cols, rows, th, k); HANDLE_ERROR(cudaMemcpyAsync(fMaph, fMapd, sizeof(uchar)*rows*cols, cudaMemcpyDeviceToHost, custream)); } __global__ void kernelC(uchar *blur, uchar *fMap, int gcols, int grows, int ANCHOR_TH, int K) { #define LIDX(x, y) [(x) + (y)*lcols] #define GIDX(x, y) [(x) + (y)*gcols] const int &lx = threadIdx.x; const int &ly = threadIdx.y; const int &lcols = blockDim.x; const int &lrows = blockDim.y; int gx = blockIdx.x*(lcols - 4) + threadIdx.x; int gy = blockIdx.y*(lrows - 4) + threadIdx.y; int dx = 0; int dy = 0; float val = 0; uchar dir = 0; uchar flag1 = 0; uchar flag2 = 0; int &com = dx; uchar center = 0; uchar fmap = 0; uchar &a = flag1; uchar &b = flag2; uchar &c = center; // uchar r; __shared__ volatile uchar sblur[32*32]; __shared__ volatile uchar sgMap[32*32]; // 以上 4.362 ms // 数据写入共享内存 7.5 ms if(gx<gcols && gy<grows) sblur LIDX(lx, ly) = blur GIDX(gx, gy); __syncthreads(); // 以上 11.767 ms // 梯度计算 17.6 ms if(lx!=0 && ly!=0 && lx<(lcols-1) && ly<(lrows-1) && gx<(gcols-1) && gy<(grows-1)) { dx = sblur LIDX(lx+1,ly-1); dx += 2 * sblur LIDX(lx+1,ly); dx += sblur LIDX(lx+1,ly+1); dx -= sblur LIDX(lx-1,ly-1); dx -= 2 * sblur LIDX(lx-1,ly); dx -= sblur LIDX(lx-1,ly+1); dx = abs(dx); dy = sblur LIDX(lx-1,ly-1); dy += 2 * sblur LIDX(lx,ly-1); dy += sblur LIDX(lx+1,ly-1); dy -= sblur LIDX(lx-1,ly+1); dy -= 2 * sblur LIDX(lx,ly+1); dy -= sblur LIDX(lx+1,ly+1); dy = abs(dy); val = 0.5f*dx + 0.5f*dy; if (val > 255) val = 255.0f; // 1 -- vertical 0 -- horizonal dir = dx > dy; fmap |= (dir<<7)&0x80; center = (uchar)(val); sgMap LIDX(lx,ly) = center; // gMap GIDX(gx,gy) = center; //debug // if(gx==732 && gy==1445) // { // printf("%d\n", center); // } } __syncthreads(); // 以上 29.3 ms // 锚点提取 21.341 ms if(lx>1 && ly>1 && lx<(lcols-2) && ly<(lrows-2) && gx<(gcols-2) && gy<(grows-2)) { // h flag1 = !dir; com = center; com -= sgMap LIDX(lx, ly-1); flag1 &= com>=ANCHOR_TH; com = center; com -= sgMap LIDX(lx, ly+1); flag1 &= com>=ANCHOR_TH; // v flag2 = dir; com = center; com -= sgMap LIDX(lx-1,ly); flag2 &= com >= ANCHOR_TH; com = center; com -= sgMap LIDX(lx+1,ly); flag2 &= com >= ANCHOR_TH; fmap |= (((flag1 | flag2) && ((gx-1)%K)==0 && ((gy-1)%K)==0)<<6)&0x40; // fmap // 0 0 0 0 0 0 0 0 // | | 左上 左 左下 右上 右 右下 // -dir| 上左 上 上右 下左 下 下右 // -keypoint // dir : 1 -- 垂直 0 -- 水平 // keypoint: 锚点 // 水平: // A方向 - 左 // B方向 - 右 // 垂直: // A方向 - 上 // B方向 - 下 // a b c // 上左 上 上右 // 左上 左 左下 a = sgMap LIDX(lx - 1, ly - 1); if(dir) // 垂直 { b = sgMap LIDX(lx, ly - 1); c = sgMap LIDX(lx + 1, ly - 1); } else { b = sgMap LIDX(lx - 1, ly); c = sgMap LIDX(lx - 1, ly + 1); } fmap |= (a>b && a>c) << 5; fmap |= (b>a && b>c) << 4; fmap |= (c>a && c>b) << 3; // a b c // 下左 下 下右 // 右上 右 右下 if(dir) // 垂直 { a = sgMap LIDX(lx - 1, ly + 1); b = sgMap LIDX(lx, ly + 1); } else { a = sgMap LIDX(lx + 1, ly - 1); b = sgMap LIDX(lx + 1, ly); } c = sgMap LIDX(lx + 1, ly + 1); fmap |= (a>b && a>c) << 2; fmap |= (b>a && b>c) << 1; fmap |= (c>a && c>b) << 0; // // debug // if(gx==114 && gy==10) // { // printf("%d\n", a); // printf("%d\n", b); // printf("%d\n", c); // } fMap GIDX(gx,gy) = fmap; } // 以上 50.641 ms }
356967f70d7af3a8d44227d5a9bf99de694db057.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_ShuffPos; glm::vec3 *dev_ShuffVel; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!"); hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!"); hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!"); hipMalloc((void**)&dev_ShuffPos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_ShuffPos failed!"); hipMalloc((void**)&dev_ShuffVel, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_ShuffVel failed!"); hipDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 boidPos = pos[iSelf]; glm::vec3 perceived_center = glm::vec3(0.0f,0.0f,0.0f); glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 perceived_vel = glm::vec3(0.0f, 0.0f, 0.0f); int neighbors1 = 0; int neighbors3 = 0; for (int i = 0; i < N; i++) { if (i != iSelf) { glm::vec3 bPos = pos[i]; float dist = glm::distance(bPos, boidPos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist < rule1Distance) { neighbors1++; perceived_center = perceived_center + bPos; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { c = c - (bPos - boidPos); } // Rule 3: boids try to match the speed of surrounding boid if (dist < rule3Distance) { perceived_vel = perceived_vel + vel[i]; neighbors3++; } } } glm::vec3 r1dv = glm::vec3(0.0f,0.0f,0.0f); if (neighbors1 > 0) { perceived_center /= (float)neighbors1; r1dv = (perceived_center - boidPos) * rule1Scale; } glm::vec3 r2dv = c * rule2Scale; glm::vec3 r3dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors3 > 0) { perceived_vel /= (float)neighbors3; r3dv = perceived_vel * rule3Scale; } return r1dv + r2dv + r3dv + vel[iSelf]; } __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 new_vel = computeVelocityChange(N, index, pos, vel1); // Compute a new velocity based on pos and vel1 // Clamp the speed if (glm::length(new_vel) > maxSpeed) { new_vel = glm::normalize(new_vel) * maxSpeed; } vel2[index] = new_vel; // Record the new velocity into vel2. Question: why NOT vel1? } __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 gs_pos = glm::floor((pos[index] - gridMin) * inverseCellWidth); /*printf("%f", inverseCellWidth); printf("%i %f %f %f\n", index, pos[index].x, pos[index].y, pos[index].z); printf("%i %f %f %f\n", index, gs_pos.x, gs_pos.y, gs_pos.z);*/ int gridIndex = gridIndex3Dto1D(gs_pos.x, gs_pos.y, gs_pos.z, gridResolution); indices[index] = index; gridIndices[index] = gridIndex; } __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } int gridIdx = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[gridIdx] = 0; } if (index == N - 1) { gridCellEndIndices[gridIdx] = N - 1; } if (index != 0 && gridIdx != particleGridIndices[index - 1]) { gridCellStartIndices[gridIdx] = index; //if (gridIdx == 8341) { // printf("%i start %i\n", gridIdx, index); //} } if (index != N-1 && gridIdx != particleGridIndices[index + 1]) { gridCellEndIndices[gridIdx] = index; //if (gridIdx == 8341) { // printf("%i end %i\n", gridIdx, index); //} //printf("%i end %i\n", gridIdx, index); } /*printf("%i %i\n", gridIdx, index); printf("%i start %i\n", gridIdx, gridCellStartIndices[gridIdx]); printf("%i end %i\n", gridIdx, gridCellEndIndices[gridIdx]);*/ // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 ws_pos = pos[index]; glm::vec3 ws_shift_pos = ws_pos - gridMin; glm::vec3 gs_posf =(ws_shift_pos * inverseCellWidth); glm::vec3 gs_pos; gs_pos.x = (int)gs_posf.x; gs_pos.y = (int)gs_posf.y; gs_pos.z = (int)gs_posf.z; glm::vec3 quadf = (ws_shift_pos - (gs_pos * cellWidth)) - (cellWidth/2.0f); glm::vec3 quad(0, 0, 0); quad.x = (int)(quadf.x / fabsf(quadf.x)); quad.y = (int)(quadf.y / fabsf(quadf.y)); quad.z = (int)(quadf.z / fabsf(quadf.z)); glm::vec3 boidPos = ws_pos; int neighbors1 = 0; int neighbors3 = 0; glm::vec3 c(0.0f, 0.0f, 0.0f); glm::vec3 perceived_center(0.0f, 0.0f, 0.0f); glm::vec3 perceived_vel(0.0f, 0.0f, 0.0f); int gridRes = gridResolution; for (int i = 0; i < 2; i++) { int xn = gs_pos.x + (i*quad.x); if (xn >= gridRes || xn < 0) { continue; } for (int j = 0; j < 2; j++) { int yn = gs_pos.y + (j*quad.y); if (yn >= gridRes || yn < 0) { continue; } for (int k = 0; k < 2; k++) { int zn = gs_pos.z + (k*quad.z); if (zn >= gridRes || zn < 0) { continue; } glm::vec3 check_grid_cell(xn, yn, zn); int grid_idx = gridIndex3Dto1D(check_grid_cell.x, check_grid_cell.y, check_grid_cell.z, gridRes); int start_idx = gridCellStartIndices[grid_idx]; int end_idx = gridCellEndIndices[grid_idx]; if (start_idx == -1) { continue; } for (int idx = start_idx; idx < end_idx + 1; idx++) { int boid_id = particleArrayIndices[idx]; glm::vec3 bPos = pos[boid_id]; float dist = glm::distance(bPos, boidPos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist > 0) { if (dist < rule1Distance) { if (index == 64) { //printf("neighbor1 %f %f %f \n", bPos.x, bPos.y, bPos.z); } neighbors1++; perceived_center = perceived_center + bPos; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { c = c - (bPos - boidPos); } // Rule 3: boids try to match the speed of surrounding boid if (dist < rule3Distance) { perceived_vel = perceived_vel + vel1[boid_id]; neighbors3++; } } } } } } glm::vec3 r1dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors1 > 0) { perceived_center /= (float)neighbors1; r1dv = (perceived_center - boidPos) * rule1Scale; } glm::vec3 r2dv = c * rule2Scale; glm::vec3 r3dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors3 > 0) { perceived_vel /= (float)neighbors3; r3dv = perceived_vel * rule3Scale; } glm::vec3 new_vel = r1dv + r2dv + r3dv + vel1[index]; if (glm::length(new_vel) > maxSpeed) { new_vel = glm::normalize(new_vel) * maxSpeed; } vel2[index] = new_vel; // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } __global__ void kernShufflePositionArray( int N, int *particleArrayIndices, glm::vec3 *shuffPos, glm::vec3 *shuffVel, glm::vec3 *pos, glm::vec3* vel) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } int old_index = particleArrayIndices[index]; shuffPos[index] = pos[old_index]; shuffVel[index] = vel[old_index]; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 ws_pos = pos[index]; glm::vec3 ws_shift_pos = ws_pos - gridMin; glm::vec3 gs_posf = (ws_shift_pos * inverseCellWidth); glm::vec3 gs_pos; gs_pos.x = (int)gs_posf.x; gs_pos.y = (int)gs_posf.y; gs_pos.z = (int)gs_posf.z; glm::vec3 quadf = (ws_shift_pos - (gs_pos * cellWidth)) - (cellWidth / 2.0f); glm::vec3 quad(0, 0, 0); quad.x = (int)(quadf.x / fabsf(quadf.x)); quad.y = (int)(quadf.y / fabsf(quadf.y)); quad.z = (int)(quadf.z / fabsf(quadf.z)); glm::vec3 boidPos = ws_pos; int neighbors1 = 0; int neighbors3 = 0; glm::vec3 c(0.0f, 0.0f, 0.0f); glm::vec3 perceived_center(0.0f, 0.0f, 0.0f); glm::vec3 perceived_vel(0.0f, 0.0f, 0.0f); int gridRes = gridResolution; for (int i = 0; i < 2; i++) { int xn = gs_pos.x + (i*quad.x); if (xn >= gridRes || xn < 0) { continue; } for (int j = 0; j < 2; j++) { int yn = gs_pos.y + (j*quad.y); if (yn >= gridRes || yn < 0) { continue; } for (int k = 0; k < 2; k++) { int zn = gs_pos.z + (k*quad.z); if (zn >= gridRes || zn < 0) { continue; } glm::vec3 check_grid_cell(xn, yn, zn); int grid_idx = gridIndex3Dto1D(check_grid_cell.x, check_grid_cell.y, check_grid_cell.z, gridRes); int start_idx = gridCellStartIndices[grid_idx]; int end_idx = gridCellEndIndices[grid_idx]; if (start_idx == -1) { continue; } for (int idx = start_idx; idx < end_idx + 1; idx++) { glm::vec3 bPos = pos[idx]; float dist = glm::distance(bPos, boidPos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist > 0) { if (dist < rule1Distance) { neighbors1++; perceived_center = perceived_center + bPos; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { c = c - (bPos - boidPos); } // Rule 3: boids try to match the speed of surrounding boid if (dist < rule3Distance) { perceived_vel = perceived_vel + vel1[idx]; neighbors3++; } } } } } } glm::vec3 r1dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors1 > 0) { perceived_center /= (float)neighbors1; r1dv = (perceived_center - boidPos) * rule1Scale; } glm::vec3 r2dv = c * rule2Scale; glm::vec3 r3dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors3 > 0) { perceived_vel /= (float)neighbors3; r3dv = perceived_vel * rule3Scale; } glm::vec3 new_vel = r1dv + r2dv + r3dv + vel1[index]; if (glm::length(new_vel) > maxSpeed) { new_vel = glm::normalize(new_vel) * maxSpeed; } vel2[index] = new_vel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { int N = numObjects; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(N, dev_pos, dev_vel1, dev_vel2); hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, N, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("stepSimulationNaive failed!"); hipDeviceSynchronize(); glm::vec3* temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationScatteredGrid(float dt) { int N = numObjects; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleArrayIndices, 0); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleGridIndices, 0); hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); // Wrap device vectors in thrust iterators for use with thrust. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices int grid_size = gridSideCount * gridSideCount *gridSideCount; dim3 fullBlocksPerGrid_GRID((grid_size + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellStartIndices, -1); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellEndIndices, -1); hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N,dev_particleGridIndices,dev_gridCellStartIndices,dev_gridCellEndIndices); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (N, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); // - Update positions kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("stepSimulationNaive failed!"); hipDeviceSynchronize(); glm::vec3* temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; // - Ping-pong buffers as needed } void Boids::stepSimulationCoherentGrid(float dt) { int N = numObjects; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleArrayIndices, 0); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleGridIndices, 0); kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); // Wrap device vectors in thrust iterators for use with thrust. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices int grid_size = gridSideCount * gridSideCount *gridSideCount; dim3 fullBlocksPerGrid_GRID((grid_size + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellStartIndices, -1); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellEndIndices, -1); kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(N, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - Perform velocity updates using neighbor search kernShufflePositionArray << <fullBlocksPerGrid, blockSize >> > (N, dev_particleArrayIndices, dev_ShuffPos, dev_ShuffVel, dev_pos, dev_vel1); hipLaunchKernelGGL(( kernUpdateVelNeighborSearchCoherent), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, N, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_ShuffPos, dev_ShuffVel, dev_vel2); // - Update positions kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_ShuffPos, dev_vel2); checkCUDAErrorWithLine("stepSimulationCoherent failed!"); hipDeviceSynchronize(); glm::vec3* temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; glm::vec3* temp_pos = dev_pos; dev_pos = dev_ShuffPos; dev_ShuffPos = temp_pos; } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. hipFree(dev_particleGridIndices); hipFree(dev_particleArrayIndices); hipFree(dev_gridCellStartIndices); hipFree(dev_gridCellEndIndices); hipFree(dev_ShuffPos); hipFree(dev_ShuffVel); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
356967f70d7af3a8d44227d5a9bf99de694db057.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 3.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_ShuffPos; glm::vec3 *dev_ShuffVel; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); cudaMalloc((void**)&dev_ShuffPos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_ShuffPos failed!"); cudaMalloc((void**)&dev_ShuffVel, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_ShuffVel failed!"); cudaThreadSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaThreadSynchronize(); } /****************** * stepSimulation * ******************/ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { glm::vec3 boidPos = pos[iSelf]; glm::vec3 perceived_center = glm::vec3(0.0f,0.0f,0.0f); glm::vec3 c = glm::vec3(0.0f, 0.0f, 0.0f); glm::vec3 perceived_vel = glm::vec3(0.0f, 0.0f, 0.0f); int neighbors1 = 0; int neighbors3 = 0; for (int i = 0; i < N; i++) { if (i != iSelf) { glm::vec3 bPos = pos[i]; float dist = glm::distance(bPos, boidPos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist < rule1Distance) { neighbors1++; perceived_center = perceived_center + bPos; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { c = c - (bPos - boidPos); } // Rule 3: boids try to match the speed of surrounding boid if (dist < rule3Distance) { perceived_vel = perceived_vel + vel[i]; neighbors3++; } } } glm::vec3 r1dv = glm::vec3(0.0f,0.0f,0.0f); if (neighbors1 > 0) { perceived_center /= (float)neighbors1; r1dv = (perceived_center - boidPos) * rule1Scale; } glm::vec3 r2dv = c * rule2Scale; glm::vec3 r3dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors3 > 0) { perceived_vel /= (float)neighbors3; r3dv = perceived_vel * rule3Scale; } return r1dv + r2dv + r3dv + vel[iSelf]; } __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 new_vel = computeVelocityChange(N, index, pos, vel1); // Compute a new velocity based on pos and vel1 // Clamp the speed if (glm::length(new_vel) > maxSpeed) { new_vel = glm::normalize(new_vel) * maxSpeed; } vel2[index] = new_vel; // Record the new velocity into vel2. Question: why NOT vel1? } __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 gs_pos = glm::floor((pos[index] - gridMin) * inverseCellWidth); /*printf("%f", inverseCellWidth); printf("%i %f %f %f\n", index, pos[index].x, pos[index].y, pos[index].z); printf("%i %f %f %f\n", index, gs_pos.x, gs_pos.y, gs_pos.z);*/ int gridIndex = gridIndex3Dto1D(gs_pos.x, gs_pos.y, gs_pos.z, gridResolution); indices[index] = index; gridIndices[index] = gridIndex; } __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } int gridIdx = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[gridIdx] = 0; } if (index == N - 1) { gridCellEndIndices[gridIdx] = N - 1; } if (index != 0 && gridIdx != particleGridIndices[index - 1]) { gridCellStartIndices[gridIdx] = index; //if (gridIdx == 8341) { // printf("%i start %i\n", gridIdx, index); //} } if (index != N-1 && gridIdx != particleGridIndices[index + 1]) { gridCellEndIndices[gridIdx] = index; //if (gridIdx == 8341) { // printf("%i end %i\n", gridIdx, index); //} //printf("%i end %i\n", gridIdx, index); } /*printf("%i %i\n", gridIdx, index); printf("%i start %i\n", gridIdx, gridCellStartIndices[gridIdx]); printf("%i end %i\n", gridIdx, gridCellEndIndices[gridIdx]);*/ // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 ws_pos = pos[index]; glm::vec3 ws_shift_pos = ws_pos - gridMin; glm::vec3 gs_posf =(ws_shift_pos * inverseCellWidth); glm::vec3 gs_pos; gs_pos.x = (int)gs_posf.x; gs_pos.y = (int)gs_posf.y; gs_pos.z = (int)gs_posf.z; glm::vec3 quadf = (ws_shift_pos - (gs_pos * cellWidth)) - (cellWidth/2.0f); glm::vec3 quad(0, 0, 0); quad.x = (int)(quadf.x / fabsf(quadf.x)); quad.y = (int)(quadf.y / fabsf(quadf.y)); quad.z = (int)(quadf.z / fabsf(quadf.z)); glm::vec3 boidPos = ws_pos; int neighbors1 = 0; int neighbors3 = 0; glm::vec3 c(0.0f, 0.0f, 0.0f); glm::vec3 perceived_center(0.0f, 0.0f, 0.0f); glm::vec3 perceived_vel(0.0f, 0.0f, 0.0f); int gridRes = gridResolution; for (int i = 0; i < 2; i++) { int xn = gs_pos.x + (i*quad.x); if (xn >= gridRes || xn < 0) { continue; } for (int j = 0; j < 2; j++) { int yn = gs_pos.y + (j*quad.y); if (yn >= gridRes || yn < 0) { continue; } for (int k = 0; k < 2; k++) { int zn = gs_pos.z + (k*quad.z); if (zn >= gridRes || zn < 0) { continue; } glm::vec3 check_grid_cell(xn, yn, zn); int grid_idx = gridIndex3Dto1D(check_grid_cell.x, check_grid_cell.y, check_grid_cell.z, gridRes); int start_idx = gridCellStartIndices[grid_idx]; int end_idx = gridCellEndIndices[grid_idx]; if (start_idx == -1) { continue; } for (int idx = start_idx; idx < end_idx + 1; idx++) { int boid_id = particleArrayIndices[idx]; glm::vec3 bPos = pos[boid_id]; float dist = glm::distance(bPos, boidPos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist > 0) { if (dist < rule1Distance) { if (index == 64) { //printf("neighbor1 %f %f %f \n", bPos.x, bPos.y, bPos.z); } neighbors1++; perceived_center = perceived_center + bPos; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { c = c - (bPos - boidPos); } // Rule 3: boids try to match the speed of surrounding boid if (dist < rule3Distance) { perceived_vel = perceived_vel + vel1[boid_id]; neighbors3++; } } } } } } glm::vec3 r1dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors1 > 0) { perceived_center /= (float)neighbors1; r1dv = (perceived_center - boidPos) * rule1Scale; } glm::vec3 r2dv = c * rule2Scale; glm::vec3 r3dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors3 > 0) { perceived_vel /= (float)neighbors3; r3dv = perceived_vel * rule3Scale; } glm::vec3 new_vel = r1dv + r2dv + r3dv + vel1[index]; if (glm::length(new_vel) > maxSpeed) { new_vel = glm::normalize(new_vel) * maxSpeed; } vel2[index] = new_vel; // - Identify the grid cell that this particle is in // - Identify which cells may contain neighbors. This isn't always 8. // - For each cell, read the start/end indices in the boid pointer array. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. // - Clamp the speed change before putting the new speed in vel2 } __global__ void kernShufflePositionArray( int N, int *particleArrayIndices, glm::vec3 *shuffPos, glm::vec3 *shuffVel, glm::vec3 *pos, glm::vec3* vel) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } int old_index = particleArrayIndices[index]; shuffPos[index] = pos[old_index]; shuffVel[index] = vel[old_index]; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 ws_pos = pos[index]; glm::vec3 ws_shift_pos = ws_pos - gridMin; glm::vec3 gs_posf = (ws_shift_pos * inverseCellWidth); glm::vec3 gs_pos; gs_pos.x = (int)gs_posf.x; gs_pos.y = (int)gs_posf.y; gs_pos.z = (int)gs_posf.z; glm::vec3 quadf = (ws_shift_pos - (gs_pos * cellWidth)) - (cellWidth / 2.0f); glm::vec3 quad(0, 0, 0); quad.x = (int)(quadf.x / fabsf(quadf.x)); quad.y = (int)(quadf.y / fabsf(quadf.y)); quad.z = (int)(quadf.z / fabsf(quadf.z)); glm::vec3 boidPos = ws_pos; int neighbors1 = 0; int neighbors3 = 0; glm::vec3 c(0.0f, 0.0f, 0.0f); glm::vec3 perceived_center(0.0f, 0.0f, 0.0f); glm::vec3 perceived_vel(0.0f, 0.0f, 0.0f); int gridRes = gridResolution; for (int i = 0; i < 2; i++) { int xn = gs_pos.x + (i*quad.x); if (xn >= gridRes || xn < 0) { continue; } for (int j = 0; j < 2; j++) { int yn = gs_pos.y + (j*quad.y); if (yn >= gridRes || yn < 0) { continue; } for (int k = 0; k < 2; k++) { int zn = gs_pos.z + (k*quad.z); if (zn >= gridRes || zn < 0) { continue; } glm::vec3 check_grid_cell(xn, yn, zn); int grid_idx = gridIndex3Dto1D(check_grid_cell.x, check_grid_cell.y, check_grid_cell.z, gridRes); int start_idx = gridCellStartIndices[grid_idx]; int end_idx = gridCellEndIndices[grid_idx]; if (start_idx == -1) { continue; } for (int idx = start_idx; idx < end_idx + 1; idx++) { glm::vec3 bPos = pos[idx]; float dist = glm::distance(bPos, boidPos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist > 0) { if (dist < rule1Distance) { neighbors1++; perceived_center = perceived_center + bPos; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { c = c - (bPos - boidPos); } // Rule 3: boids try to match the speed of surrounding boid if (dist < rule3Distance) { perceived_vel = perceived_vel + vel1[idx]; neighbors3++; } } } } } } glm::vec3 r1dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors1 > 0) { perceived_center /= (float)neighbors1; r1dv = (perceived_center - boidPos) * rule1Scale; } glm::vec3 r2dv = c * rule2Scale; glm::vec3 r3dv = glm::vec3(0.0f, 0.0f, 0.0f); if (neighbors3 > 0) { perceived_vel /= (float)neighbors3; r3dv = perceived_vel * rule3Scale; } glm::vec3 new_vel = r1dv + r2dv + r3dv + vel1[index]; if (glm::length(new_vel) > maxSpeed) { new_vel = glm::normalize(new_vel) * maxSpeed; } vel2[index] = new_vel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { int N = numObjects; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(N, dev_pos, dev_vel1, dev_vel2); kernUpdatePos <<<fullBlocksPerGrid, blockSize >>>(N, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("stepSimulationNaive failed!"); cudaThreadSynchronize(); glm::vec3* temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationScatteredGrid(float dt) { int N = numObjects; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleArrayIndices, 0); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleGridIndices, 0); kernComputeIndices<<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); // Wrap device vectors in thrust iterators for use with thrust. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices int grid_size = gridSideCount * gridSideCount *gridSideCount; dim3 fullBlocksPerGrid_GRID((grid_size + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellStartIndices, -1); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellEndIndices, -1); kernIdentifyCellStartEnd<<<fullBlocksPerGrid, blockSize>>>(N,dev_particleGridIndices,dev_gridCellStartIndices,dev_gridCellEndIndices); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (N, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); // - Update positions kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_pos, dev_vel2); checkCUDAErrorWithLine("stepSimulationNaive failed!"); cudaThreadSynchronize(); glm::vec3* temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; // - Ping-pong buffers as needed } void Boids::stepSimulationCoherentGrid(float dt) { int N = numObjects; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleArrayIndices, 0); kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (N, dev_particleGridIndices, 0); kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices); dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices); // Wrap device vectors in thrust iterators for use with thrust. thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices int grid_size = gridSideCount * gridSideCount *gridSideCount; dim3 fullBlocksPerGrid_GRID((grid_size + blockSize - 1) / blockSize); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellStartIndices, -1); kernResetIntBuffer << <fullBlocksPerGrid_GRID, blockSize >> > (grid_size, dev_gridCellEndIndices, -1); kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(N, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // - Perform velocity updates using neighbor search kernShufflePositionArray << <fullBlocksPerGrid, blockSize >> > (N, dev_particleArrayIndices, dev_ShuffPos, dev_ShuffVel, dev_pos, dev_vel1); kernUpdateVelNeighborSearchCoherent<<<fullBlocksPerGrid, blockSize >>> (N, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_ShuffPos, dev_ShuffVel, dev_vel2); // - Update positions kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_ShuffPos, dev_vel2); checkCUDAErrorWithLine("stepSimulationCoherent failed!"); cudaThreadSynchronize(); glm::vec3* temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; glm::vec3* temp_pos = dev_pos; dev_pos = dev_ShuffPos; dev_ShuffPos = temp_pos; } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. cudaFree(dev_particleGridIndices); cudaFree(dev_particleArrayIndices); cudaFree(dev_gridCellStartIndices); cudaFree(dev_gridCellEndIndices); cudaFree(dev_ShuffPos); cudaFree(dev_ShuffVel); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
d48d6a06ed35cfb809f9ab04d38701572b0648aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019, Xuhao Chen #include "motif.h" #include "timer.h" #include "cutils.h" #define USE_PID #define USE_SIMPLE #define VERTEX_INDUCED #include "miner.cuh" #include <hipcub/hipcub.hpp> #include <thrust/scan.h> #include <thrust/execution_policy.h> typedef hipcub::BlockReduce<AccType, BLOCK_SIZE> BlockReduce; void printout_motifs(int npatterns, AccType *accumulators) { std::cout << std::endl; if (npatterns == 2) { std::cout << "\ttriangles\t" << accumulators[0] << std::endl; std::cout << "\t3-chains\t" << accumulators[1] << std::endl; } else if (npatterns == 6) { std::cout << "\t4-paths --> " << accumulators[0] << std::endl; std::cout << "\t3-stars --> " << accumulators[1] << std::endl; std::cout << "\t4-cycles --> " << accumulators[2] << std::endl; std::cout << "\ttailed-triangles --> " << accumulators[3] << std::endl; std::cout << "\tdiamonds --> " << accumulators[4] << std::endl; std::cout << "\t4-cliques --> " << accumulators[5] << std::endl; } else { std::cout << "\ttoo many patterns to show\n"; } std::cout << std::endl; } __global__ void extend_alloc(unsigned m, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *num_new_emb) { unsigned tid = threadIdx.x; unsigned pos = blockIdx.x * blockDim.x + threadIdx.x; __shared__ IndexT emb[BLOCK_SIZE][MAX_SIZE]; if(pos < m) { IndexT num = 0; emb_list.get_embedding(level, pos, emb[tid]); for (unsigned i = 0; i < level+1; ++i) { IndexT src = emb[tid][i]; IndexT row_begin = graph.edge_begin(src); IndexT row_end = graph.edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph.getEdgeDst(e); if (!is_vertexInduced_automorphism(level+1, emb[tid], i, src, dst, graph)) num ++; } } num_new_emb[pos] = num; } } __global__ void extend_insert(unsigned m, unsigned max_size, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *indices) { unsigned tid = threadIdx.x; unsigned pos = blockIdx.x * blockDim.x + threadIdx.x; __shared__ IndexT emb[BLOCK_SIZE][MAX_SIZE]; if(pos < m) { emb_list.get_embedding(level, pos, emb[tid]); IndexT start = indices[pos]; for (unsigned i = 0; i < level+1; ++i) { IndexT src = emb[tid][i]; IndexT row_begin = graph.edge_begin(src); IndexT row_end = graph.edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph.getEdgeDst(e); if (!is_vertexInduced_automorphism(level+1, emb[tid], i, src, dst, graph)) { if (level == 1 && max_size == 4) emb_list.set_pid(start, find_3motif_pattern_id(i, dst, emb[tid], graph, start)); emb_list.set_idx(level+1, start, pos); emb_list.set_vid(level+1, start++, dst); } } } } } __global__ void aggregate(unsigned m, unsigned level, unsigned npatterns, CSRGraph graph, EmbeddingList emb_list, AccType *accumulators) { unsigned tid = threadIdx.x; unsigned pos = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ typename BlockReduce::TempStorage temp_storage; __shared__ IndexT emb[BLOCK_SIZE][MAX_SIZE]; AccType local_num[6]; for (int i = 0; i < npatterns; i++) local_num[i] = 0; if(pos < m) { unsigned pattern = 0; emb_list.get_embedding(level, pos, emb[tid]); //if (pos == 0) printout_embedding(level, emb[tid]); unsigned n = level+1; assert(n < 4); if (n == 3) pattern = emb_list.get_pid(pos); for (unsigned i = 0; i < n; ++i) { IndexT src = emb[tid][i]; IndexT row_begin = graph.edge_begin(src); IndexT row_end = graph.edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph.getEdgeDst(e); if (!is_vertexInduced_automorphism(n, emb[tid], i, src, dst, graph)) { unsigned pid = 1; // 3-chain //if (i == 0 && is_connected(emb[tid][1], dst, graph)) pid = 0; // triangle if (n == 2) pid = find_3motif_pattern_id(i, dst, emb[tid], graph, pos); else pid = find_4motif_pattern_id(n, i, dst, emb[tid], pattern, graph, pos); //printf("pid = %u\n", pid); local_num[pid] += 1; } } } } //AccType block_num; for (int i = 0; i < npatterns; i++) { //block_num = BlockReduce(temp_storage).Sum(local_num[i]); //if(threadIdx.x == 0) atomicAdd(&accumulators[i], block_num); atomicAdd(&accumulators[i], local_num[i]); } } __global__ void clear(AccType *accumulators) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; accumulators[i] = 0; } void parallel_prefix_sum(int n, IndexT *in, IndexT *out) { IndexT total = 0; for (size_t i = 0; i < n; i++) { out[i] = total; total += in[i]; } out[n] = total; } void motif_gpu_solver(std::string fname, unsigned k, std::vector<AccType> &acc, size_t N_CHUNK) { size_t npatterns = acc.size(); AccType *h_accumulators = (AccType *)malloc(sizeof(AccType) * npatterns); for (int i = 0; i < npatterns; i++) h_accumulators[i] = 0; AccType *d_accumulators; CUDA_SAFE_CALL(hipMalloc((void **)&d_accumulators, sizeof(AccType) * npatterns)); hipLaunchKernelGGL(( clear), dim3(1), dim3(npatterns), 0, 0, d_accumulators); CudaTest("clear accumulator failed"); CSRGraph graph_cpu, graph_gpu; graph_cpu.read(fname, false); // read graph into CPU memoryA int m = graph_cpu.get_nnodes(); int nnz = graph_cpu.get_nedges(); graph_cpu.copy_to_gpu(graph_gpu); // copy graph to GPU memory int nthreads = BLOCK_SIZE; int nblocks = DIVIDE_INTO(m, nthreads); printf("Launching CUDA TC solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads); EmbeddingList emb_list; emb_list.init(nnz, k, false); emb_list.init_cpu(&graph_cpu); CUDA_SAFE_CALL(hipDeviceSynchronize()); Timer t; t.Start(); unsigned level = 1; unsigned num_emb = emb_list.size(); while (level < k-2) { IndexT *num_new_emb, *indices; CUDA_SAFE_CALL(hipMalloc((void **)&num_new_emb, sizeof(IndexT) * (num_emb+1))); CUDA_SAFE_CALL(hipMalloc((void **)&indices, sizeof(IndexT) * (num_emb+1))); nblocks = (num_emb-1)/nthreads+1; hipLaunchKernelGGL(( extend_alloc), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, level, graph_gpu, emb_list, num_new_emb); CudaTest("solving extend_alloc failed"); thrust::exclusive_scan(thrust::device, num_new_emb, num_new_emb+num_emb+1, indices); CudaTest("Scan failed"); IndexT new_size; CUDA_SAFE_CALL(hipMemcpy(&new_size, &indices[num_emb], sizeof(IndexT), hipMemcpyDeviceToHost)); assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32 emb_list.add_level(new_size); #ifdef USE_WEDGE //if (level == 1 && max_size == 4) { // is_wedge.resize(emb_list.size()); // std::fill(is_wedge.begin(), is_wedge.end(), 0); //} #endif hipLaunchKernelGGL(( extend_insert), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, k, level, graph_gpu, emb_list, indices); CudaTest("solving extend_insert failed"); std::cout << "Extend_insert Done\n"; num_emb = emb_list.size(); CUDA_SAFE_CALL(hipFree(num_new_emb)); CUDA_SAFE_CALL(hipFree(indices)); level ++; } if (k < 5) { nblocks = (num_emb-1)/nthreads+1; hipLaunchKernelGGL(( aggregate), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, level, npatterns, graph_gpu, emb_list, d_accumulators); CudaTest("solving aggregate failed"); } else { printf("Not supported\n"); } CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Stop(); printf("\truntime = %f ms.\n", t.Millisecs()); CUDA_SAFE_CALL(hipMemcpy(h_accumulators, d_accumulators, sizeof(AccType) * npatterns, hipMemcpyDeviceToHost)); printout_motifs(npatterns, h_accumulators); CUDA_SAFE_CALL(hipFree(d_accumulators)); }
d48d6a06ed35cfb809f9ab04d38701572b0648aa.cu
// Copyright (c) 2019, Xuhao Chen #include "motif.h" #include "timer.h" #include "cutils.h" #define USE_PID #define USE_SIMPLE #define VERTEX_INDUCED #include "miner.cuh" #include <cub/cub.cuh> #include <thrust/scan.h> #include <thrust/execution_policy.h> typedef cub::BlockReduce<AccType, BLOCK_SIZE> BlockReduce; void printout_motifs(int npatterns, AccType *accumulators) { std::cout << std::endl; if (npatterns == 2) { std::cout << "\ttriangles\t" << accumulators[0] << std::endl; std::cout << "\t3-chains\t" << accumulators[1] << std::endl; } else if (npatterns == 6) { std::cout << "\t4-paths --> " << accumulators[0] << std::endl; std::cout << "\t3-stars --> " << accumulators[1] << std::endl; std::cout << "\t4-cycles --> " << accumulators[2] << std::endl; std::cout << "\ttailed-triangles --> " << accumulators[3] << std::endl; std::cout << "\tdiamonds --> " << accumulators[4] << std::endl; std::cout << "\t4-cliques --> " << accumulators[5] << std::endl; } else { std::cout << "\ttoo many patterns to show\n"; } std::cout << std::endl; } __global__ void extend_alloc(unsigned m, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *num_new_emb) { unsigned tid = threadIdx.x; unsigned pos = blockIdx.x * blockDim.x + threadIdx.x; __shared__ IndexT emb[BLOCK_SIZE][MAX_SIZE]; if(pos < m) { IndexT num = 0; emb_list.get_embedding(level, pos, emb[tid]); for (unsigned i = 0; i < level+1; ++i) { IndexT src = emb[tid][i]; IndexT row_begin = graph.edge_begin(src); IndexT row_end = graph.edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph.getEdgeDst(e); if (!is_vertexInduced_automorphism(level+1, emb[tid], i, src, dst, graph)) num ++; } } num_new_emb[pos] = num; } } __global__ void extend_insert(unsigned m, unsigned max_size, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *indices) { unsigned tid = threadIdx.x; unsigned pos = blockIdx.x * blockDim.x + threadIdx.x; __shared__ IndexT emb[BLOCK_SIZE][MAX_SIZE]; if(pos < m) { emb_list.get_embedding(level, pos, emb[tid]); IndexT start = indices[pos]; for (unsigned i = 0; i < level+1; ++i) { IndexT src = emb[tid][i]; IndexT row_begin = graph.edge_begin(src); IndexT row_end = graph.edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph.getEdgeDst(e); if (!is_vertexInduced_automorphism(level+1, emb[tid], i, src, dst, graph)) { if (level == 1 && max_size == 4) emb_list.set_pid(start, find_3motif_pattern_id(i, dst, emb[tid], graph, start)); emb_list.set_idx(level+1, start, pos); emb_list.set_vid(level+1, start++, dst); } } } } } __global__ void aggregate(unsigned m, unsigned level, unsigned npatterns, CSRGraph graph, EmbeddingList emb_list, AccType *accumulators) { unsigned tid = threadIdx.x; unsigned pos = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ typename BlockReduce::TempStorage temp_storage; __shared__ IndexT emb[BLOCK_SIZE][MAX_SIZE]; AccType local_num[6]; for (int i = 0; i < npatterns; i++) local_num[i] = 0; if(pos < m) { unsigned pattern = 0; emb_list.get_embedding(level, pos, emb[tid]); //if (pos == 0) printout_embedding(level, emb[tid]); unsigned n = level+1; assert(n < 4); if (n == 3) pattern = emb_list.get_pid(pos); for (unsigned i = 0; i < n; ++i) { IndexT src = emb[tid][i]; IndexT row_begin = graph.edge_begin(src); IndexT row_end = graph.edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph.getEdgeDst(e); if (!is_vertexInduced_automorphism(n, emb[tid], i, src, dst, graph)) { unsigned pid = 1; // 3-chain //if (i == 0 && is_connected(emb[tid][1], dst, graph)) pid = 0; // triangle if (n == 2) pid = find_3motif_pattern_id(i, dst, emb[tid], graph, pos); else pid = find_4motif_pattern_id(n, i, dst, emb[tid], pattern, graph, pos); //printf("pid = %u\n", pid); local_num[pid] += 1; } } } } //AccType block_num; for (int i = 0; i < npatterns; i++) { //block_num = BlockReduce(temp_storage).Sum(local_num[i]); //if(threadIdx.x == 0) atomicAdd(&accumulators[i], block_num); atomicAdd(&accumulators[i], local_num[i]); } } __global__ void clear(AccType *accumulators) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; accumulators[i] = 0; } void parallel_prefix_sum(int n, IndexT *in, IndexT *out) { IndexT total = 0; for (size_t i = 0; i < n; i++) { out[i] = total; total += in[i]; } out[n] = total; } void motif_gpu_solver(std::string fname, unsigned k, std::vector<AccType> &acc, size_t N_CHUNK) { size_t npatterns = acc.size(); AccType *h_accumulators = (AccType *)malloc(sizeof(AccType) * npatterns); for (int i = 0; i < npatterns; i++) h_accumulators[i] = 0; AccType *d_accumulators; CUDA_SAFE_CALL(cudaMalloc((void **)&d_accumulators, sizeof(AccType) * npatterns)); clear<<<1, npatterns>>>(d_accumulators); CudaTest("clear accumulator failed"); CSRGraph graph_cpu, graph_gpu; graph_cpu.read(fname, false); // read graph into CPU memoryA int m = graph_cpu.get_nnodes(); int nnz = graph_cpu.get_nedges(); graph_cpu.copy_to_gpu(graph_gpu); // copy graph to GPU memory int nthreads = BLOCK_SIZE; int nblocks = DIVIDE_INTO(m, nthreads); printf("Launching CUDA TC solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads); EmbeddingList emb_list; emb_list.init(nnz, k, false); emb_list.init_cpu(&graph_cpu); CUDA_SAFE_CALL(cudaDeviceSynchronize()); Timer t; t.Start(); unsigned level = 1; unsigned num_emb = emb_list.size(); while (level < k-2) { IndexT *num_new_emb, *indices; CUDA_SAFE_CALL(cudaMalloc((void **)&num_new_emb, sizeof(IndexT) * (num_emb+1))); CUDA_SAFE_CALL(cudaMalloc((void **)&indices, sizeof(IndexT) * (num_emb+1))); nblocks = (num_emb-1)/nthreads+1; extend_alloc<<<nblocks, nthreads>>>(num_emb, level, graph_gpu, emb_list, num_new_emb); CudaTest("solving extend_alloc failed"); thrust::exclusive_scan(thrust::device, num_new_emb, num_new_emb+num_emb+1, indices); CudaTest("Scan failed"); IndexT new_size; CUDA_SAFE_CALL(cudaMemcpy(&new_size, &indices[num_emb], sizeof(IndexT), cudaMemcpyDeviceToHost)); assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32 emb_list.add_level(new_size); #ifdef USE_WEDGE //if (level == 1 && max_size == 4) { // is_wedge.resize(emb_list.size()); // std::fill(is_wedge.begin(), is_wedge.end(), 0); //} #endif extend_insert<<<nblocks, nthreads>>>(num_emb, k, level, graph_gpu, emb_list, indices); CudaTest("solving extend_insert failed"); std::cout << "Extend_insert Done\n"; num_emb = emb_list.size(); CUDA_SAFE_CALL(cudaFree(num_new_emb)); CUDA_SAFE_CALL(cudaFree(indices)); level ++; } if (k < 5) { nblocks = (num_emb-1)/nthreads+1; aggregate<<<nblocks, nthreads>>>(num_emb, level, npatterns, graph_gpu, emb_list, d_accumulators); CudaTest("solving aggregate failed"); } else { printf("Not supported\n"); } CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Stop(); printf("\truntime = %f ms.\n", t.Millisecs()); CUDA_SAFE_CALL(cudaMemcpy(h_accumulators, d_accumulators, sizeof(AccType) * npatterns, cudaMemcpyDeviceToHost)); printout_motifs(npatterns, h_accumulators); CUDA_SAFE_CALL(cudaFree(d_accumulators)); }
8a4fc0118a96f6712fe14975e1750ddd7413bd65.hip
// !!! This is a file automatically generated by hipify!!! //====================================== // // GPU //====================================== #include"stdafx.h" #include"UpSampling_DATA.hpp" #include"UpSampling_FUNC.hpp" #include"UpSampling_Base.h" #include"UpSampling_GPU.cuh" #include"UpSampling_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** */ UpSampling_GPU::UpSampling_GPU(Gravisbell::GUID guid, UpSampling_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : UpSampling_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1)) , layerData (i_layerData) /**< */ , inputBufferCount (0) /**< */ , outputBufferCount (0) /**< */ , cudnnHandle (NULL) , inputTensorDesc (NULL) , outputTensorDesc (NULL) , filterDesc (NULL) , convDesc (NULL) { cudnnCreate(&cudnnHandle); cudnnCreateTensorDescriptor(&inputTensorDesc); cudnnCreateTensorDescriptor(&outputTensorDesc); cudnnCreateFilterDescriptor(&filterDesc); cudnnCreateConvolutionDescriptor(&convDesc); } /** */ UpSampling_GPU::~UpSampling_GPU() { if(convDesc) cudnnDestroyConvolutionDescriptor(convDesc); if(filterDesc) cudnnDestroyFilterDescriptor(filterDesc); if(outputTensorDesc) cudnnDestroyTensorDescriptor(outputTensorDesc); if(inputTensorDesc) cudnnDestroyTensorDescriptor(inputTensorDesc); if(cudnnHandle) cudnnDestroy(cudnnHandle); } //================================ // //================================ /** */ U32 UpSampling_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** . @return 0 */ ErrorCode UpSampling_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // //=========================== /** */ UpSampling_LayerData_Base& UpSampling_GPU::GetLayerData() { return this->layerData; } const UpSampling_LayerData_Base& UpSampling_GPU::GetLayerData()const { return this->layerData; } //================================ // //================================ /** .() @param batchSize . NN. PreProcessLearnLoop. */ ErrorCode UpSampling_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; return ErrorCode::ERROR_CODE_NONE; } /** .() @param batchSize . NN. Calculate. */ ErrorCode UpSampling_GPU::PreProcessCalculate() { // this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; cudnnStatus_t err_cudnn; // S32 dataDim = 1 + 1 + 0; // + + 0 std::vector<S32> dimInput; // std::vector<S32> dimInputStride; // std::vector<S32> dimOutput; std::vector<S32> dimOutputStride; S32 filterDim = 0; // + + std::vector<S32> dimFilter; S32 convDim = 0; // std::vector<S32> dimStride; std::vector<S32> dimDilation; std::vector<S32> dimPadding; if(this->GetInputDataStruct().z > 1) { dataDim = 1 + 1 + 3; dimInput.resize(dataDim); dimInput[0] = this->GetBatchSize(); dimInput[1] = this->GetInputDataStruct().ch; dimInput[2] = this->GetInputDataStruct().z; dimInput[3] = this->GetInputDataStruct().y; dimInput[4] = this->GetInputDataStruct().x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[1] = dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[2] = dimInput[3] * dimInput[4]; dimInputStride[3] = dimInput[4]; dimInputStride[4] = 1; dimOutput.resize(dataDim); dimOutput[0] = this->GetBatchSize(); dimOutput[1] = this->GetOutputDataStruct().ch; dimOutput[2] = this->GetOutputDataStruct().z; dimOutput[3] = this->GetOutputDataStruct().y; dimOutput[4] = this->GetOutputDataStruct().x; dimOutputStride.resize(dataDim); dimOutputStride[0] = dimOutput[1] * dimOutput[2] * dimOutput[3] * dimOutput[4]; dimOutputStride[1] = dimOutput[2] * dimOutput[3] * dimOutput[4]; dimOutputStride[2] = dimOutput[3] * dimOutput[4]; dimOutputStride[3] = dimOutput[4]; dimOutputStride[4] = 1; filterDim = 1 + 1 + 2; // + + 3 dimFilter.resize(filterDim); dimFilter[0] = this->GetOutputDataStruct().ch; dimFilter[1] = this->GetInputDataStruct().ch; dimFilter[2] = this->layerData.layerStructure.UpScale.y; dimFilter[3] = this->layerData.layerStructure.UpScale.x; convDim = 2; // 3 dimPadding.resize(convDim); dimPadding[0] = 0; dimPadding[1] = 0; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimStride.resize(convDim); dimStride[0] = 1; dimStride[1] = 1; } else if(this->GetInputDataStruct().y > 1) { dataDim = 1 + 1 + 2; dimInput.resize(dataDim); dimInput[0] = this->GetBatchSize() * this->GetInputDataStruct().ch; dimInput[1] = 1; dimInput[2] = this->GetInputDataStruct().y; dimInput[3] = this->GetInputDataStruct().x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3]; dimInputStride[1] = dimInput[2] * dimInput[3]; dimInputStride[2] = dimInput[3]; dimInputStride[3] = 1; dimOutput.resize(dataDim); dimOutput[0] = this->GetBatchSize() * this->GetOutputDataStruct().ch; dimOutput[1] = 1; dimOutput[2] = this->GetOutputDataStruct().y; dimOutput[3] = this->GetOutputDataStruct().x; dimOutputStride.resize(dataDim); dimOutputStride[0] = dimOutput[1] * dimOutput[2] * dimOutput[3]; dimOutputStride[1] = dimOutput[2] * dimOutput[3]; dimOutputStride[2] = dimOutput[3]; dimOutputStride[3] = 1; filterDim = 1 + 1 + 2; // + + 3 dimFilter.resize(filterDim); dimFilter[0] = 1; dimFilter[1] = 1; dimFilter[2] = this->layerData.layerStructure.UpScale.y; dimFilter[3] = this->layerData.layerStructure.UpScale.x; convDim = 2; // 2 dimPadding.resize(convDim); dimPadding[0] = 0; dimPadding[1] = 0; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.UpScale.y; dimStride[1] = this->layerData.layerStructure.UpScale.x; } else if(this->GetInputDataStruct().x > 1) { dataDim = 1 + 1 + 1; dimInput.resize(dataDim); dimInput[0] = this->GetBatchSize(); dimInput[1] = this->GetInputDataStruct().ch; dimInput[2] = this->GetInputDataStruct().x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2]; dimInputStride[1] = dimInput[2]; dimInputStride[2] = 1; dimOutput.resize(dataDim); dimOutput[0] = this->GetBatchSize(); dimOutput[1] = this->GetOutputDataStruct().ch; dimOutput[2] = this->GetOutputDataStruct().x; dimOutputStride.resize(dataDim); dimOutputStride[0] = dimOutput[1] * dimOutput[2]; dimOutputStride[1] = dimOutput[2]; dimOutputStride[2] = 1; } else { return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; } // CUDNN err_cudnn = cudnnSetTensorNdDescriptor( this->inputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimInput[0], &dimInputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // CUDNN err_cudnn = cudnnSetTensorNdDescriptor( this->outputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimOutput[0], &dimOutputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // err_cudnn = cudnnSetFilterNdDescriptor( this->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filterDim, &dimFilter[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // err_cudnn = cudnnSetConvolutionNdDescriptor( this->convDesc, convDim, &dimPadding[0], &dimStride[0], &dimDilation[0], CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // () err_cudnn = cudnnGetConvolutionForwardAlgorithm( this->cudnnHandle, this->outputTensorDesc, this->filterDesc, this->convDesc, this->inputTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, // 0, // &this->useForwardAlgorithm ); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // () size_t workSpaceSizeByte_forward; err_cudnn = cudnnGetConvolutionForwardWorkspaceSize( this->cudnnHandle, this->outputTensorDesc, this->filterDesc, this->convDesc, this->inputTensorDesc, this->useForwardAlgorithm, &workSpaceSizeByte_forward); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // (-) err_cudnn = cudnnGetConvolutionBackwardDataAlgorithm( this->cudnnHandle, this->filterDesc, this->inputTensorDesc, this->convDesc, this->outputTensorDesc, cudnnConvolutionBwdDataPreference_t::CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, // 0, // &this->useBackwardDataAlgorithm); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // (-) size_t workSpaceSizeByte_backwardData; err_cudnn = cudnnGetConvolutionBackwardDataWorkspaceSize( this->cudnnHandle, this->filterDesc, this->inputTensorDesc, this->convDesc, this->outputTensorDesc, this->useBackwardDataAlgorithm, &workSpaceSizeByte_backwardData); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // this->workSpace.resize(max(workSpaceSizeByte_forward, workSpaceSizeByte_backwardData)); // filter.resize( this->layerData.layerStructure.UpScale.x * this->layerData.layerStructure.UpScale.y * this->layerData.layerStructure.UpScale.z, 0.0f); for(U32 z=0; z<this->layerData.layerStructure.UpScale.z; z++) { U32 zOffset = z * this->layerData.layerStructure.UpScale.y * this->layerData.layerStructure.UpScale.x; for(U32 y=0; y<this->layerData.layerStructure.UpScale.y; y++) { U32 yOffset = y * this->layerData.layerStructure.UpScale.x; for(U32 x=0; x<this->layerData.layerStructure.UpScale.x; x++) { U32 offset = zOffset + yOffset + x; switch(this->layerData.layerStructure.PaddingType) { case UpSampling::LayerStructure::PaddingType_value: { filter[offset] = 1.0f; } break; case UpSampling::LayerStructure::PaddingType_zero: { if(z==0 && y==0 && x==0) filter[offset] = 1.0f; else filter[offset] = 0.0f; } break; } } } } return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. */ ErrorCode UpSampling_GPU::PreProcessLoop() { return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** . @param lpInputBuffer . GetInputBufferCount @return 0 */ ErrorCode UpSampling_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer) { cudnnStatus_t err_cudnn; // hipMemset( o_lppOutputBuffer, 0, this->outputBufferCount * this->GetBatchSize() * sizeof(F32)); // { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionBackwardData( this->cudnnHandle, &alpha, this->filterDesc, thrust::raw_pointer_cast(&this->filter[0]), this->inputTensorDesc, i_lppInputBuffer, this->convDesc, this->useBackwardDataAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->outputTensorDesc, o_lppOutputBuffer); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } #ifdef _DEBUG std::vector<F32> lpDebugInputBuffer(this->GetBatchSize() * this->inputBufferCount); hipMemcpy(&lpDebugInputBuffer[0], i_lppInputBuffer, sizeof(F32)*lpDebugInputBuffer.size(), hipMemcpyDeviceToHost); std::vector<F32> lpDebugOutputBuffer(this->GetBatchSize() * this->outputBufferCount); hipMemcpy(&lpDebugOutputBuffer[0], o_lppOutputBuffer, sizeof(F32)*lpDebugOutputBuffer.size(), hipMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } //================================ // //================================ /** .. Calculate. @param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()]. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode UpSampling_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { cudnnStatus_t err_cudnn; // if(o_lppDInputBuffer) { // hipMemset( o_lppDInputBuffer, 0, sizeof(F32)*this->inputBufferCount*this->GetBatchSize()); { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionForward( this->cudnnHandle, &alpha, this->outputTensorDesc, i_lppDOutputBuffer, this->filterDesc, thrust::raw_pointer_cast(&this->filter[0]), this->convDesc, this->useForwardAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->inputTensorDesc, o_lppDInputBuffer); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } } return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode UpSampling_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); } } // Gravisbell; } // Layer; } // NeuralNetwork;
8a4fc0118a96f6712fe14975e1750ddd7413bd65.cu
//====================================== // 畳み込みニューラルネットワークの結合レイヤー // GPU処理用 //====================================== #include"stdafx.h" #include"UpSampling_DATA.hpp" #include"UpSampling_FUNC.hpp" #include"UpSampling_Base.h" #include"UpSampling_GPU.cuh" #include"UpSampling_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** コンストラクタ */ UpSampling_GPU::UpSampling_GPU(Gravisbell::GUID guid, UpSampling_LayerData_GPU& i_layerData, const IODataStruct& i_inputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) : UpSampling_Base (guid, i_inputDataStruct, i_layerData.GetOutputDataStruct(&i_inputDataStruct, 1)) , layerData (i_layerData) /**< レイヤーデータ */ , inputBufferCount (0) /**< 入力バッファ数 */ , outputBufferCount (0) /**< 出力バッファ数 */ , cudnnHandle (NULL) , inputTensorDesc (NULL) , outputTensorDesc (NULL) , filterDesc (NULL) , convDesc (NULL) { cudnnCreate(&cudnnHandle); cudnnCreateTensorDescriptor(&inputTensorDesc); cudnnCreateTensorDescriptor(&outputTensorDesc); cudnnCreateFilterDescriptor(&filterDesc); cudnnCreateConvolutionDescriptor(&convDesc); } /** デストラクタ */ UpSampling_GPU::~UpSampling_GPU() { if(convDesc) cudnnDestroyConvolutionDescriptor(convDesc); if(filterDesc) cudnnDestroyFilterDescriptor(filterDesc); if(outputTensorDesc) cudnnDestroyTensorDescriptor(outputTensorDesc); if(inputTensorDesc) cudnnDestroyTensorDescriptor(inputTensorDesc); if(cudnnHandle) cudnnDestroy(cudnnHandle); } //================================ // 基本処理 //================================ /** レイヤー種別の取得 */ U32 UpSampling_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** 初期化. 各ニューロンの値をランダムに初期化 @return 成功した場合0 */ ErrorCode UpSampling_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // レイヤーデータ関連 //=========================== /** レイヤーデータを取得する */ UpSampling_LayerData_Base& UpSampling_GPU::GetLayerData() { return this->layerData; } const UpSampling_LayerData_Base& UpSampling_GPU::GetLayerData()const { return this->layerData; } //================================ // 演算処理 //================================ /** 演算前処理を実行する.(学習用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */ ErrorCode UpSampling_GPU::PreProcessLearn() { ErrorCode errorCode = this->PreProcessCalculate(); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; return ErrorCode::ERROR_CODE_NONE; } /** 演算前処理を実行する.(演算用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode UpSampling_GPU::PreProcessCalculate() { // 入力バッファ数を確認 this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // 出力バッファ数を確認 this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; cudnnStatus_t err_cudnn; // 次元数を調べる S32 dataDim = 1 + 1 + 0; // バッチ + チャンネル + 次元0 std::vector<S32> dimInput; // 入力データ構造 std::vector<S32> dimInputStride; // 入力データの各次元ごとのデータ数 std::vector<S32> dimOutput; std::vector<S32> dimOutputStride; S32 filterDim = 0; // フィルタ次元数 入力チャンネル + 出力チャンネル + 次元 std::vector<S32> dimFilter; S32 convDim = 0; // 畳み込み次元数 次元 std::vector<S32> dimStride; std::vector<S32> dimDilation; std::vector<S32> dimPadding; if(this->GetInputDataStruct().z > 1) { dataDim = 1 + 1 + 3; dimInput.resize(dataDim); dimInput[0] = this->GetBatchSize(); dimInput[1] = this->GetInputDataStruct().ch; dimInput[2] = this->GetInputDataStruct().z; dimInput[3] = this->GetInputDataStruct().y; dimInput[4] = this->GetInputDataStruct().x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[1] = dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[2] = dimInput[3] * dimInput[4]; dimInputStride[3] = dimInput[4]; dimInputStride[4] = 1; dimOutput.resize(dataDim); dimOutput[0] = this->GetBatchSize(); dimOutput[1] = this->GetOutputDataStruct().ch; dimOutput[2] = this->GetOutputDataStruct().z; dimOutput[3] = this->GetOutputDataStruct().y; dimOutput[4] = this->GetOutputDataStruct().x; dimOutputStride.resize(dataDim); dimOutputStride[0] = dimOutput[1] * dimOutput[2] * dimOutput[3] * dimOutput[4]; dimOutputStride[1] = dimOutput[2] * dimOutput[3] * dimOutput[4]; dimOutputStride[2] = dimOutput[3] * dimOutput[4]; dimOutputStride[3] = dimOutput[4]; dimOutputStride[4] = 1; filterDim = 1 + 1 + 2; // 入力チャンネル + 出力チャンネル + 次元3 dimFilter.resize(filterDim); dimFilter[0] = this->GetOutputDataStruct().ch; dimFilter[1] = this->GetInputDataStruct().ch; dimFilter[2] = this->layerData.layerStructure.UpScale.y; dimFilter[3] = this->layerData.layerStructure.UpScale.x; convDim = 2; // 次元3 dimPadding.resize(convDim); dimPadding[0] = 0; dimPadding[1] = 0; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimStride.resize(convDim); dimStride[0] = 1; dimStride[1] = 1; } else if(this->GetInputDataStruct().y > 1) { dataDim = 1 + 1 + 2; dimInput.resize(dataDim); dimInput[0] = this->GetBatchSize() * this->GetInputDataStruct().ch; dimInput[1] = 1; dimInput[2] = this->GetInputDataStruct().y; dimInput[3] = this->GetInputDataStruct().x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3]; dimInputStride[1] = dimInput[2] * dimInput[3]; dimInputStride[2] = dimInput[3]; dimInputStride[3] = 1; dimOutput.resize(dataDim); dimOutput[0] = this->GetBatchSize() * this->GetOutputDataStruct().ch; dimOutput[1] = 1; dimOutput[2] = this->GetOutputDataStruct().y; dimOutput[3] = this->GetOutputDataStruct().x; dimOutputStride.resize(dataDim); dimOutputStride[0] = dimOutput[1] * dimOutput[2] * dimOutput[3]; dimOutputStride[1] = dimOutput[2] * dimOutput[3]; dimOutputStride[2] = dimOutput[3]; dimOutputStride[3] = 1; filterDim = 1 + 1 + 2; // 入力チャンネル + 出力チャンネル + 次元3 dimFilter.resize(filterDim); dimFilter[0] = 1; dimFilter[1] = 1; dimFilter[2] = this->layerData.layerStructure.UpScale.y; dimFilter[3] = this->layerData.layerStructure.UpScale.x; convDim = 2; // 次元2 dimPadding.resize(convDim); dimPadding[0] = 0; dimPadding[1] = 0; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.UpScale.y; dimStride[1] = this->layerData.layerStructure.UpScale.x; } else if(this->GetInputDataStruct().x > 1) { dataDim = 1 + 1 + 1; dimInput.resize(dataDim); dimInput[0] = this->GetBatchSize(); dimInput[1] = this->GetInputDataStruct().ch; dimInput[2] = this->GetInputDataStruct().x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2]; dimInputStride[1] = dimInput[2]; dimInputStride[2] = 1; dimOutput.resize(dataDim); dimOutput[0] = this->GetBatchSize(); dimOutput[1] = this->GetOutputDataStruct().ch; dimOutput[2] = this->GetOutputDataStruct().x; dimOutputStride.resize(dataDim); dimOutputStride[0] = dimOutput[1] * dimOutput[2]; dimOutputStride[1] = dimOutput[2]; dimOutputStride[2] = 1; } else { return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; } // CUDNNの入力データ構造を設定 err_cudnn = cudnnSetTensorNdDescriptor( this->inputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimInput[0], &dimInputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // CUDNNの出力データ構造を設定 err_cudnn = cudnnSetTensorNdDescriptor( this->outputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimOutput[0], &dimOutputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // フィルタサイズを設定 err_cudnn = cudnnSetFilterNdDescriptor( this->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filterDim, &dimFilter[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 畳み込み処理設定 err_cudnn = cudnnSetConvolutionNdDescriptor( this->convDesc, convDim, &dimPadding[0], &dimStride[0], &dimDilation[0], CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 最速のアルゴリズムを検索する(前方伝播) err_cudnn = cudnnGetConvolutionForwardAlgorithm( this->cudnnHandle, this->outputTensorDesc, this->filterDesc, this->convDesc, this->inputTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, // メモリの使用量無制限で最速のアルゴリズムを調べる 0, // 使用可能なメモリの上限 &this->useForwardAlgorithm ); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 必要なメモリ量を調べる(前方伝播) size_t workSpaceSizeByte_forward; err_cudnn = cudnnGetConvolutionForwardWorkspaceSize( this->cudnnHandle, this->outputTensorDesc, this->filterDesc, this->convDesc, this->inputTensorDesc, this->useForwardAlgorithm, &workSpaceSizeByte_forward); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 最速のアルゴリズムを検索する(後方伝播-データ) err_cudnn = cudnnGetConvolutionBackwardDataAlgorithm( this->cudnnHandle, this->filterDesc, this->inputTensorDesc, this->convDesc, this->outputTensorDesc, cudnnConvolutionBwdDataPreference_t::CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, // メモリの使用量無制限で最速のアルゴリズムを調べる 0, // 使用可能なメモリの上限 &this->useBackwardDataAlgorithm); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 必要なメモリ量を調べる(後方伝播-データ) size_t workSpaceSizeByte_backwardData; err_cudnn = cudnnGetConvolutionBackwardDataWorkspaceSize( this->cudnnHandle, this->filterDesc, this->inputTensorDesc, this->convDesc, this->outputTensorDesc, this->useBackwardDataAlgorithm, &workSpaceSizeByte_backwardData); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 処理用バッファの確保 this->workSpace.resize(max(workSpaceSizeByte_forward, workSpaceSizeByte_backwardData)); // フィルタバッファを作成して初期化 filter.resize( this->layerData.layerStructure.UpScale.x * this->layerData.layerStructure.UpScale.y * this->layerData.layerStructure.UpScale.z, 0.0f); for(U32 z=0; z<this->layerData.layerStructure.UpScale.z; z++) { U32 zOffset = z * this->layerData.layerStructure.UpScale.y * this->layerData.layerStructure.UpScale.x; for(U32 y=0; y<this->layerData.layerStructure.UpScale.y; y++) { U32 yOffset = y * this->layerData.layerStructure.UpScale.x; for(U32 x=0; x<this->layerData.layerStructure.UpScale.x; x++) { U32 offset = zOffset + yOffset + x; switch(this->layerData.layerStructure.PaddingType) { case UpSampling::LayerStructure::PaddingType_value: { filter[offset] = 1.0f; } break; case UpSampling::LayerStructure::PaddingType_zero: { if(z==0 && y==0 && x==0) filter[offset] = 1.0f; else filter[offset] = 0.0f; } break; } } } } return ErrorCode::ERROR_CODE_NONE; } /** ループの初期化処理.データセットの実行開始前に実行する 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode UpSampling_GPU::PreProcessLoop() { return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** 演算処理を実行する. @param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要 @return 成功した場合0が返る */ ErrorCode UpSampling_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppOutputBuffer) { cudnnStatus_t err_cudnn; // 出力バッファをクリア cudaMemset( o_lppOutputBuffer, 0, this->outputBufferCount * this->GetBatchSize() * sizeof(F32)); // 入力バッファを出力にコピー { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionBackwardData( this->cudnnHandle, &alpha, this->filterDesc, thrust::raw_pointer_cast(&this->filter[0]), this->inputTensorDesc, i_lppInputBuffer, this->convDesc, this->useBackwardDataAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->outputTensorDesc, o_lppOutputBuffer); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } #ifdef _DEBUG std::vector<F32> lpDebugInputBuffer(this->GetBatchSize() * this->inputBufferCount); cudaMemcpy(&lpDebugInputBuffer[0], i_lppInputBuffer, sizeof(F32)*lpDebugInputBuffer.size(), cudaMemcpyDeviceToHost); std::vector<F32> lpDebugOutputBuffer(this->GetBatchSize() * this->outputBufferCount); cudaMemcpy(&lpDebugOutputBuffer[0], o_lppOutputBuffer, sizeof(F32)*lpDebugOutputBuffer.size(), cudaMemcpyDeviceToHost); #endif return ErrorCode::ERROR_CODE_NONE; } //================================ // 学習処理 //================================ /** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する. 入力信号、出力信号は直前のCalculateの値を参照する. @param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode UpSampling_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { cudnnStatus_t err_cudnn; // 入力誤差計算 if(o_lppDInputBuffer) { // 入力誤差バッファのクリア cudaMemset( o_lppDInputBuffer, 0, sizeof(F32)*this->inputBufferCount*this->GetBatchSize()); { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionForward( this->cudnnHandle, &alpha, this->outputTensorDesc, i_lppDOutputBuffer, this->filterDesc, thrust::raw_pointer_cast(&this->filter[0]), this->convDesc, this->useForwardAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->inputTensorDesc, o_lppDInputBuffer); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } } return ErrorCode::ERROR_CODE_NONE; } /** 学習処理を実行する. 入力信号、出力信号は直前のCalculateの値を参照する. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode UpSampling_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer, BATCH_BUFFER_POINTER o_lppDInputBuffer, CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer) { return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer); } } // Gravisbell; } // Layer; } // NeuralNetwork;