hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
eced369b68daa95651d9facc80fb84191b5cf35a.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018~2020 XGBoost contributors */ #include <xgboost/logging.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <memory> #include <mutex> #include <utility> #include <vector> #include "device_helpers_hip.cuh" #include "hist_util.h" #include "hist_util_hip.cuh" #include "math.h" // NOLINT #include "quantile.h" #include "categorical.h" #include "xgboost/host_device_vector.h" namespace xgboost { namespace common { constexpr float SketchContainer::kFactor; namespace detail { size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) { double eps = 1.0 / (WQSketch::kFactor * max_bins); size_t dummy_nlevel; size_t num_cuts; WQuantileSketch<bst_float, bst_float>::LimitSizeLevel( num_rows, eps, &dummy_nlevel, &num_cuts); return ::min(num_cuts, num_rows); } size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns, size_t max_bins, size_t nnz) { auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows); auto if_dense = num_columns * per_column; auto result = ::min(nnz, if_dense); return result; } size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz, size_t num_bins, bool with_weights) { size_t peak = 0; // 0. Allocate cut pointer in quantile container by increasing: n_columns + 1 size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 1. Copy and sort: 2 * bytes_per_element * shape total += BytesPerElement(with_weights) * num_rows * num_columns; peak = ::max(peak, total); // 2. Deallocate bytes_per_element * shape due to reusing memory in sort. total -= BytesPerElement(with_weights) * num_rows * num_columns / 2; // 3. Allocate colomn size scan by increasing: n_columns + 1 total += (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 4. Allocate cut pointer by increasing: n_columns + 1 total += (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry); // 6. Deallocate copied entries by reducing: bytes_per_element * shape. peak = ::max(peak, total); total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2; // 7. Deallocate column size scan. peak = ::max(peak, total); total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 8. Deallocate cut size scan. total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 9. Allocate final cut values, min values, cut ptrs: ::min(rows, bins + 1) * // n_columns + n_columns + n_columns + 1 total += ::min(num_rows, num_bins) * num_columns * sizeof(float); total += num_columns * sizeof(std::remove_reference_t<decltype( std::declval<HistogramCuts>().MinValues())>::value_type); total += (num_columns + 1) * sizeof(std::remove_reference_t<decltype( std::declval<HistogramCuts>().Ptrs())>::value_type); peak = ::max(peak, total); return peak; } size_t SketchBatchNumElements(size_t sketch_batch_num_elements, bst_row_t num_rows, bst_feature_t columns, size_t nnz, int device, size_t num_cuts, bool has_weight) { #if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 // device available memory is not accurate when rmm is used. return nnz; #endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 if (sketch_batch_num_elements == 0) { auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight); // use up to 80% of available space auto avail = dh::AvailableMemory(device) * 0.8; if (required_memory > avail) { sketch_batch_num_elements = avail / BytesPerElement(has_weight); } else { sketch_batch_num_elements = ::min(num_rows * static_cast<size_t>(columns), nnz); } } return sketch_batch_num_elements; } void SortByWeight(dh::device_vector<float>* weights, dh::device_vector<Entry>* sorted_entries) { // Sort both entries and wegihts. dh::XGBDeviceAllocator<char> alloc; thrust::sort_by_key(thrust::hip::par(alloc), sorted_entries->begin(), sorted_entries->end(), weights->begin(), detail::EntryCompareOp()); // Scan weights dh::XGBCachingDeviceAllocator<char> caching; thrust::inclusive_scan_by_key(thrust::hip::par(caching), sorted_entries->begin(), sorted_entries->end(), weights->begin(), weights->begin(), [=] __device__(const Entry& a, const Entry& b) { return a.index == b.index; }); } void RemoveDuplicatedCategories( int32_t device, MetaInfo const &info, Span<bst_row_t> d_cuts_ptr, dh::device_vector<Entry> *p_sorted_entries, dh::caching_device_vector<size_t> *p_column_sizes_scan) { info.feature_types.SetDevice(device); auto d_feature_types = info.feature_types.ConstDeviceSpan(); CHECK(!d_feature_types.empty()); auto &column_sizes_scan = *p_column_sizes_scan; auto &sorted_entries = *p_sorted_entries; // Removing duplicated entries in categorical features. dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size()); dh::SegmentedUnique(column_sizes_scan.data().get(), column_sizes_scan.data().get() + column_sizes_scan.size(), sorted_entries.begin(), sorted_entries.end(), new_column_scan.data().get(), sorted_entries.begin(), [=] __device__(Entry const &l, Entry const &r) { if (l.index == r.index) { if (IsCat(d_feature_types, l.index)) { return l.fvalue == r.fvalue; } } return false; }); // Renew the column scan and cut scan based on categorical data. auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan); dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size( info.num_col_ + 1); CHECK_EQ(new_column_scan.size(), new_cuts_size.size()); dh::LaunchN( new_column_scan.size(), [=, d_new_cuts_size = dh::ToSpan(new_cuts_size), d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan), d_new_columns_ptr = dh::ToSpan(new_column_scan)] __device__(size_t idx) { d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx]; if (idx == d_new_columns_ptr.size() - 1) { return; } if (IsCat(d_feature_types, idx)) { // Cut size is the same as number of categories in input. d_new_cuts_size[idx] = d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx]; } else { d_new_cuts_size[idx] = d_cuts_ptr[idx + 1] - d_cuts_ptr[idx]; } }); // Turn size into ptr. thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(), new_cuts_size.cend(), d_cuts_ptr.data()); } } // namespace detail void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page, size_t begin, size_t end, SketchContainer *sketch_container, int num_cuts_per_feature, size_t num_columns) { dh::XGBCachingDeviceAllocator<char> alloc; dh::device_vector<Entry> sorted_entries; if (page.data.DeviceCanRead()) { const auto& device_data = page.data.ConstDevicePointer(); sorted_entries = dh::device_vector<Entry>(device_data + begin, device_data + end); } else { const auto& host_data = page.data.ConstHostVector(); sorted_entries = dh::device_vector<Entry>(host_data.begin() + begin, host_data.begin() + end); } thrust::sort(thrust::hip::par(alloc), sorted_entries.begin(), sorted_entries.end(), detail::EntryCompareOp()); HostDeviceVector<SketchContainer::OffsetT> cuts_ptr; dh::caching_device_vector<size_t> column_sizes_scan; data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN()); auto batch_it = dh::MakeTransformIterator<data::COOTuple>( sorted_entries.data().get(), [] __device__(Entry const &e) -> data::COOTuple { return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size. }); detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature, batch_it, dummy_is_valid, 0, sorted_entries.size(), &cuts_ptr, &column_sizes_scan); auto d_cuts_ptr = cuts_ptr.DeviceSpan(); if (sketch_container->HasCategorical()) { detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, &column_sizes_scan); } auto const& h_cuts_ptr = cuts_ptr.ConstHostVector(); CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size()); // add cuts into sketches sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan), d_cuts_ptr, h_cuts_ptr.back()); sorted_entries.clear(); sorted_entries.shrink_to_fit(); CHECK_EQ(sorted_entries.capacity(), 0); CHECK_NE(cuts_ptr.Size(), 0); } void ProcessWeightedBatch(int device, const SparsePage& page, MetaInfo const& info, size_t begin, size_t end, SketchContainer* sketch_container, int num_cuts_per_feature, size_t num_columns, bool is_ranking, Span<bst_group_t const> d_group_ptr) { auto weights = info.weights_.ConstDeviceSpan(); dh::XGBCachingDeviceAllocator<char> alloc; const auto& host_data = page.data.ConstHostVector(); dh::device_vector<Entry> sorted_entries(host_data.begin() + begin, host_data.begin() + end); // Binary search to assign weights to each element dh::device_vector<float> temp_weights(sorted_entries.size()); auto d_temp_weights = temp_weights.data().get(); page.offset.SetDevice(device); auto row_ptrs = page.offset.ConstDeviceSpan(); size_t base_rowid = page.base_rowid; if (is_ranking) { CHECK_GE(d_group_ptr.size(), 2) << "Must have at least 1 group for ranking."; CHECK_EQ(weights.size(), d_group_ptr.size() - 1) << "Weight size should equal to number of groups."; dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) { size_t element_idx = idx + begin; size_t ridx = dh::SegmentId(row_ptrs, element_idx); bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid); d_temp_weights[idx] = weights[group_idx]; }); } else { dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) { size_t element_idx = idx + begin; size_t ridx = dh::SegmentId(row_ptrs, element_idx); d_temp_weights[idx] = weights[ridx + base_rowid]; }); } detail::SortByWeight(&temp_weights, &sorted_entries); HostDeviceVector<SketchContainer::OffsetT> cuts_ptr; dh::caching_device_vector<size_t> column_sizes_scan; data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN()); auto batch_it = dh::MakeTransformIterator<data::COOTuple>( sorted_entries.data().get(), [] __device__(Entry const &e) -> data::COOTuple { return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size. }); detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature, batch_it, dummy_is_valid, 0, sorted_entries.size(), &cuts_ptr, &column_sizes_scan); auto d_cuts_ptr = cuts_ptr.DeviceSpan(); if (sketch_container->HasCategorical()) { detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, &column_sizes_scan); } auto const& h_cuts_ptr = cuts_ptr.ConstHostVector(); // Extract cuts sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan), d_cuts_ptr, h_cuts_ptr.back(), dh::ToSpan(temp_weights)); sorted_entries.clear(); sorted_entries.shrink_to_fit(); } HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins, size_t sketch_batch_num_elements) { dmat->Info().feature_types.SetDevice(device); dmat->Info().feature_types.ConstDevicePointer(); // pull to device early // Configure batch size based on available memory bool has_weights = dmat->Info().weights_.Size() > 0; size_t num_cuts_per_feature = detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_); sketch_batch_num_elements = detail::SketchBatchNumElements( sketch_batch_num_elements, dmat->Info().num_row_, dmat->Info().num_col_, dmat->Info().num_nonzero_, device, num_cuts_per_feature, has_weights); HistogramCuts cuts; SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_, dmat->Info().num_row_, device); dmat->Info().weights_.SetDevice(device); for (const auto& batch : dmat->GetBatches<SparsePage>()) { size_t batch_nnz = batch.data.Size(); auto const& info = dmat->Info(); for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) { size_t end = ::min(batch_nnz, size_t(begin + sketch_batch_num_elements)); if (has_weights) { bool is_ranking = HostSketchContainer::UseGroup(dmat->Info()); dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(), info.group_ptr_.cend()); ProcessWeightedBatch( device, batch, dmat->Info(), begin, end, &sketch_container, num_cuts_per_feature, dmat->Info().num_col_, is_ranking, dh::ToSpan(groups)); } else { ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container, num_cuts_per_feature, dmat->Info().num_col_); } } } sketch_container.MakeCuts(&cuts); return cuts; } } // namespace common } // namespace xgboost
eced369b68daa95651d9facc80fb84191b5cf35a.cu
/*! * Copyright 2018~2020 XGBoost contributors */ #include <xgboost/logging.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <memory> #include <mutex> #include <utility> #include <vector> #include "device_helpers.cuh" #include "hist_util.h" #include "hist_util.cuh" #include "math.h" // NOLINT #include "quantile.h" #include "categorical.h" #include "xgboost/host_device_vector.h" namespace xgboost { namespace common { constexpr float SketchContainer::kFactor; namespace detail { size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) { double eps = 1.0 / (WQSketch::kFactor * max_bins); size_t dummy_nlevel; size_t num_cuts; WQuantileSketch<bst_float, bst_float>::LimitSizeLevel( num_rows, eps, &dummy_nlevel, &num_cuts); return std::min(num_cuts, num_rows); } size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns, size_t max_bins, size_t nnz) { auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows); auto if_dense = num_columns * per_column; auto result = std::min(nnz, if_dense); return result; } size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz, size_t num_bins, bool with_weights) { size_t peak = 0; // 0. Allocate cut pointer in quantile container by increasing: n_columns + 1 size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 1. Copy and sort: 2 * bytes_per_element * shape total += BytesPerElement(with_weights) * num_rows * num_columns; peak = std::max(peak, total); // 2. Deallocate bytes_per_element * shape due to reusing memory in sort. total -= BytesPerElement(with_weights) * num_rows * num_columns / 2; // 3. Allocate colomn size scan by increasing: n_columns + 1 total += (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 4. Allocate cut pointer by increasing: n_columns + 1 total += (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry); // 6. Deallocate copied entries by reducing: bytes_per_element * shape. peak = std::max(peak, total); total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2; // 7. Deallocate column size scan. peak = std::max(peak, total); total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 8. Deallocate cut size scan. total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT); // 9. Allocate final cut values, min values, cut ptrs: std::min(rows, bins + 1) * // n_columns + n_columns + n_columns + 1 total += std::min(num_rows, num_bins) * num_columns * sizeof(float); total += num_columns * sizeof(std::remove_reference_t<decltype( std::declval<HistogramCuts>().MinValues())>::value_type); total += (num_columns + 1) * sizeof(std::remove_reference_t<decltype( std::declval<HistogramCuts>().Ptrs())>::value_type); peak = std::max(peak, total); return peak; } size_t SketchBatchNumElements(size_t sketch_batch_num_elements, bst_row_t num_rows, bst_feature_t columns, size_t nnz, int device, size_t num_cuts, bool has_weight) { #if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 // device available memory is not accurate when rmm is used. return nnz; #endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1 if (sketch_batch_num_elements == 0) { auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight); // use up to 80% of available space auto avail = dh::AvailableMemory(device) * 0.8; if (required_memory > avail) { sketch_batch_num_elements = avail / BytesPerElement(has_weight); } else { sketch_batch_num_elements = std::min(num_rows * static_cast<size_t>(columns), nnz); } } return sketch_batch_num_elements; } void SortByWeight(dh::device_vector<float>* weights, dh::device_vector<Entry>* sorted_entries) { // Sort both entries and wegihts. dh::XGBDeviceAllocator<char> alloc; thrust::sort_by_key(thrust::cuda::par(alloc), sorted_entries->begin(), sorted_entries->end(), weights->begin(), detail::EntryCompareOp()); // Scan weights dh::XGBCachingDeviceAllocator<char> caching; thrust::inclusive_scan_by_key(thrust::cuda::par(caching), sorted_entries->begin(), sorted_entries->end(), weights->begin(), weights->begin(), [=] __device__(const Entry& a, const Entry& b) { return a.index == b.index; }); } void RemoveDuplicatedCategories( int32_t device, MetaInfo const &info, Span<bst_row_t> d_cuts_ptr, dh::device_vector<Entry> *p_sorted_entries, dh::caching_device_vector<size_t> *p_column_sizes_scan) { info.feature_types.SetDevice(device); auto d_feature_types = info.feature_types.ConstDeviceSpan(); CHECK(!d_feature_types.empty()); auto &column_sizes_scan = *p_column_sizes_scan; auto &sorted_entries = *p_sorted_entries; // Removing duplicated entries in categorical features. dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size()); dh::SegmentedUnique(column_sizes_scan.data().get(), column_sizes_scan.data().get() + column_sizes_scan.size(), sorted_entries.begin(), sorted_entries.end(), new_column_scan.data().get(), sorted_entries.begin(), [=] __device__(Entry const &l, Entry const &r) { if (l.index == r.index) { if (IsCat(d_feature_types, l.index)) { return l.fvalue == r.fvalue; } } return false; }); // Renew the column scan and cut scan based on categorical data. auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan); dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size( info.num_col_ + 1); CHECK_EQ(new_column_scan.size(), new_cuts_size.size()); dh::LaunchN( new_column_scan.size(), [=, d_new_cuts_size = dh::ToSpan(new_cuts_size), d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan), d_new_columns_ptr = dh::ToSpan(new_column_scan)] __device__(size_t idx) { d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx]; if (idx == d_new_columns_ptr.size() - 1) { return; } if (IsCat(d_feature_types, idx)) { // Cut size is the same as number of categories in input. d_new_cuts_size[idx] = d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx]; } else { d_new_cuts_size[idx] = d_cuts_ptr[idx + 1] - d_cuts_ptr[idx]; } }); // Turn size into ptr. thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(), new_cuts_size.cend(), d_cuts_ptr.data()); } } // namespace detail void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page, size_t begin, size_t end, SketchContainer *sketch_container, int num_cuts_per_feature, size_t num_columns) { dh::XGBCachingDeviceAllocator<char> alloc; dh::device_vector<Entry> sorted_entries; if (page.data.DeviceCanRead()) { const auto& device_data = page.data.ConstDevicePointer(); sorted_entries = dh::device_vector<Entry>(device_data + begin, device_data + end); } else { const auto& host_data = page.data.ConstHostVector(); sorted_entries = dh::device_vector<Entry>(host_data.begin() + begin, host_data.begin() + end); } thrust::sort(thrust::cuda::par(alloc), sorted_entries.begin(), sorted_entries.end(), detail::EntryCompareOp()); HostDeviceVector<SketchContainer::OffsetT> cuts_ptr; dh::caching_device_vector<size_t> column_sizes_scan; data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN()); auto batch_it = dh::MakeTransformIterator<data::COOTuple>( sorted_entries.data().get(), [] __device__(Entry const &e) -> data::COOTuple { return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size. }); detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature, batch_it, dummy_is_valid, 0, sorted_entries.size(), &cuts_ptr, &column_sizes_scan); auto d_cuts_ptr = cuts_ptr.DeviceSpan(); if (sketch_container->HasCategorical()) { detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, &column_sizes_scan); } auto const& h_cuts_ptr = cuts_ptr.ConstHostVector(); CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size()); // add cuts into sketches sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan), d_cuts_ptr, h_cuts_ptr.back()); sorted_entries.clear(); sorted_entries.shrink_to_fit(); CHECK_EQ(sorted_entries.capacity(), 0); CHECK_NE(cuts_ptr.Size(), 0); } void ProcessWeightedBatch(int device, const SparsePage& page, MetaInfo const& info, size_t begin, size_t end, SketchContainer* sketch_container, int num_cuts_per_feature, size_t num_columns, bool is_ranking, Span<bst_group_t const> d_group_ptr) { auto weights = info.weights_.ConstDeviceSpan(); dh::XGBCachingDeviceAllocator<char> alloc; const auto& host_data = page.data.ConstHostVector(); dh::device_vector<Entry> sorted_entries(host_data.begin() + begin, host_data.begin() + end); // Binary search to assign weights to each element dh::device_vector<float> temp_weights(sorted_entries.size()); auto d_temp_weights = temp_weights.data().get(); page.offset.SetDevice(device); auto row_ptrs = page.offset.ConstDeviceSpan(); size_t base_rowid = page.base_rowid; if (is_ranking) { CHECK_GE(d_group_ptr.size(), 2) << "Must have at least 1 group for ranking."; CHECK_EQ(weights.size(), d_group_ptr.size() - 1) << "Weight size should equal to number of groups."; dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) { size_t element_idx = idx + begin; size_t ridx = dh::SegmentId(row_ptrs, element_idx); bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid); d_temp_weights[idx] = weights[group_idx]; }); } else { dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) { size_t element_idx = idx + begin; size_t ridx = dh::SegmentId(row_ptrs, element_idx); d_temp_weights[idx] = weights[ridx + base_rowid]; }); } detail::SortByWeight(&temp_weights, &sorted_entries); HostDeviceVector<SketchContainer::OffsetT> cuts_ptr; dh::caching_device_vector<size_t> column_sizes_scan; data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN()); auto batch_it = dh::MakeTransformIterator<data::COOTuple>( sorted_entries.data().get(), [] __device__(Entry const &e) -> data::COOTuple { return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size. }); detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature, batch_it, dummy_is_valid, 0, sorted_entries.size(), &cuts_ptr, &column_sizes_scan); auto d_cuts_ptr = cuts_ptr.DeviceSpan(); if (sketch_container->HasCategorical()) { detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, &column_sizes_scan); } auto const& h_cuts_ptr = cuts_ptr.ConstHostVector(); // Extract cuts sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan), d_cuts_ptr, h_cuts_ptr.back(), dh::ToSpan(temp_weights)); sorted_entries.clear(); sorted_entries.shrink_to_fit(); } HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins, size_t sketch_batch_num_elements) { dmat->Info().feature_types.SetDevice(device); dmat->Info().feature_types.ConstDevicePointer(); // pull to device early // Configure batch size based on available memory bool has_weights = dmat->Info().weights_.Size() > 0; size_t num_cuts_per_feature = detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_); sketch_batch_num_elements = detail::SketchBatchNumElements( sketch_batch_num_elements, dmat->Info().num_row_, dmat->Info().num_col_, dmat->Info().num_nonzero_, device, num_cuts_per_feature, has_weights); HistogramCuts cuts; SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_, dmat->Info().num_row_, device); dmat->Info().weights_.SetDevice(device); for (const auto& batch : dmat->GetBatches<SparsePage>()) { size_t batch_nnz = batch.data.Size(); auto const& info = dmat->Info(); for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) { size_t end = std::min(batch_nnz, size_t(begin + sketch_batch_num_elements)); if (has_weights) { bool is_ranking = HostSketchContainer::UseGroup(dmat->Info()); dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(), info.group_ptr_.cend()); ProcessWeightedBatch( device, batch, dmat->Info(), begin, end, &sketch_container, num_cuts_per_feature, dmat->Info().num_col_, is_ranking, dh::ToSpan(groups)); } else { ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container, num_cuts_per_feature, dmat->Info().num_col_); } } } sketch_container.MakeCuts(&cuts); return cuts; } } // namespace common } // namespace xgboost
58351ac8102856859276910830997a01f8cd7fd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef ASSEMBLYSHAREDNZCUDA_CU #define ASSEMBLYSHAREDNZCUDA_CU #include "../General.cu" #include "../Problem.h" #define NDOF 3 // Node degrees of freedom #define NPE 4 // Nodes per element #define VNPE 4 // Corner nodes per element #define NDIM 3 // Number of dimensions #define EDOF (NPE*NDOF) // Element degrees of freedom template <int TYPE, int BLOCK_SIZE, typename T> __global__ void assembleSharedNZ( T* coord, T* force, int* scatterPartPtr, int* scatterArray, int* eNumPart, int* suppPtr, T* suppData, int* redPartPtr, int* redList, T* KF ) { // Prefetch partition pointers extern __shared__ T sMem[]; __shared__ int sPtr[6]; int tid = threadIdx.x; // Prefetch pointers if( tid <= 1 ) { sPtr[tid] = scatterPartPtr[blockIdx.x + tid]; sPtr[2+tid] = redPartPtr[blockIdx.x + tid]; } if( tid == 2 ) { sPtr[4] = eNumPart[blockIdx.x]; } if( tid == 3 ) { sPtr[5] = suppPtr[blockIdx.x]; } __syncthreads(); // Scatter nodal data to shared memory // Prefetch element coord and BC data scatterXF3<BLOCK_SIZE>( tid + sPtr[0], sPtr[1], coord, force, sMem, scatterArray ); __syncthreads(); // Compute the element data and store in shared mem int numE = sPtr[4]; T* sE = sMem + tid * ((EDOF*(EDOF+3))/2); suppData += sPtr[5] + tid; while( tid < numE ) { const T x1 = sE[ 0], y1 = sE[ 1], z1 = sE[ 2]; const T bx1 = sE[ 3], by1 = sE[ 4], bz1 = sE[ 5]; const T x2 = sE[ 6], y2 = sE[ 7], z2 = sE[ 8]; const T bx2 = sE[ 9], by2 = sE[10], bz2 = sE[11]; const T x3 = sE[12], y3 = sE[13], z3 = sE[14]; const T bx3 = sE[15], by3 = sE[16], bz3 = sE[17]; const T x4 = sE[18], y4 = sE[19], z4 = sE[20]; const T bx4 = sE[21], by4 = sE[22], bz4 = sE[23]; const T Jinv11 = suppData[0*BLOCK_SIZE]; const T Jinv12 = suppData[1*BLOCK_SIZE]; const T Jinv13 = suppData[2*BLOCK_SIZE]; const T Jinv22 = suppData[3*BLOCK_SIZE]; const T Jinv23 = suppData[4*BLOCK_SIZE]; const T Jinv33 = suppData[5*BLOCK_SIZE]; Problem<T>::Tetrahedral<TYPE,1>( x1, y1, z1, bx1, by1, bz1, x2, y2, z2, bx2, by2, bz2, x3, y3, z3, bx3, by3, bz3, x4, y4, z4, bx4, by4, bz4, Jinv11, Jinv12, Jinv13, Jinv22, Jinv23, Jinv33, sE ); tid += BLOCK_SIZE; sE += BLOCK_SIZE * ((EDOF*(EDOF+3))/2); suppData += BLOCK_SIZE * 6; } __syncthreads(); // Assemble reduce<BLOCK_SIZE>(threadIdx.x + sPtr[2], sPtr[3], sMem, KF, redList ); } #endif
58351ac8102856859276910830997a01f8cd7fd4.cu
#ifndef ASSEMBLYSHAREDNZCUDA_CU #define ASSEMBLYSHAREDNZCUDA_CU #include "../General.cu" #include "../Problem.h" #define NDOF 3 // Node degrees of freedom #define NPE 4 // Nodes per element #define VNPE 4 // Corner nodes per element #define NDIM 3 // Number of dimensions #define EDOF (NPE*NDOF) // Element degrees of freedom template <int TYPE, int BLOCK_SIZE, typename T> __global__ void assembleSharedNZ( T* coord, T* force, int* scatterPartPtr, int* scatterArray, int* eNumPart, int* suppPtr, T* suppData, int* redPartPtr, int* redList, T* KF ) { // Prefetch partition pointers extern __shared__ T sMem[]; __shared__ int sPtr[6]; int tid = threadIdx.x; // Prefetch pointers if( tid <= 1 ) { sPtr[tid] = scatterPartPtr[blockIdx.x + tid]; sPtr[2+tid] = redPartPtr[blockIdx.x + tid]; } if( tid == 2 ) { sPtr[4] = eNumPart[blockIdx.x]; } if( tid == 3 ) { sPtr[5] = suppPtr[blockIdx.x]; } __syncthreads(); // Scatter nodal data to shared memory // Prefetch element coord and BC data scatterXF3<BLOCK_SIZE>( tid + sPtr[0], sPtr[1], coord, force, sMem, scatterArray ); __syncthreads(); // Compute the element data and store in shared mem int numE = sPtr[4]; T* sE = sMem + tid * ((EDOF*(EDOF+3))/2); suppData += sPtr[5] + tid; while( tid < numE ) { const T x1 = sE[ 0], y1 = sE[ 1], z1 = sE[ 2]; const T bx1 = sE[ 3], by1 = sE[ 4], bz1 = sE[ 5]; const T x2 = sE[ 6], y2 = sE[ 7], z2 = sE[ 8]; const T bx2 = sE[ 9], by2 = sE[10], bz2 = sE[11]; const T x3 = sE[12], y3 = sE[13], z3 = sE[14]; const T bx3 = sE[15], by3 = sE[16], bz3 = sE[17]; const T x4 = sE[18], y4 = sE[19], z4 = sE[20]; const T bx4 = sE[21], by4 = sE[22], bz4 = sE[23]; const T Jinv11 = suppData[0*BLOCK_SIZE]; const T Jinv12 = suppData[1*BLOCK_SIZE]; const T Jinv13 = suppData[2*BLOCK_SIZE]; const T Jinv22 = suppData[3*BLOCK_SIZE]; const T Jinv23 = suppData[4*BLOCK_SIZE]; const T Jinv33 = suppData[5*BLOCK_SIZE]; Problem<T>::Tetrahedral<TYPE,1>( x1, y1, z1, bx1, by1, bz1, x2, y2, z2, bx2, by2, bz2, x3, y3, z3, bx3, by3, bz3, x4, y4, z4, bx4, by4, bz4, Jinv11, Jinv12, Jinv13, Jinv22, Jinv23, Jinv33, sE ); tid += BLOCK_SIZE; sE += BLOCK_SIZE * ((EDOF*(EDOF+3))/2); suppData += BLOCK_SIZE * 6; } __syncthreads(); // Assemble reduce<BLOCK_SIZE>(threadIdx.x + sPtr[2], sPtr[3], sMem, KF, redList ); } #endif
6cae9e6e11a3589d018ea7e05115a30985455065.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define NUM 10000000 #define CUDA_ERROR_EXIT(str) do{\ hipError_t err = hipGetLastError();\ if( err != hipSuccess){\ printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) struct num_array{ double num1; double num2; double result; }; __device__ void function(struct num_array *a) { double square = a ->num1 * a->num1 + a->num2 * a->num2 + 2 * a->num1 * a->num2; a->result = log(square)/sin(square); return; } __global__ void calculate(char *mem, int num, int rows) { int thread_id = threadIdx.x + rows * threadIdx.y;//flattened thread id value;; int i = blockDim.x * blockDim.y * blockIdx.x + thread_id; // int i_x = blockDim.x * blockIdx.x + threadIdx.x; // int i_y = blockDim.x * blockIdx.x + threadIdx.y; // int i = i_x + rows * i_y; // printf("%d %d %d\n", i, blockDim.y, blockIdx.y); //why blockDim.y is not zero?? if(i >= num) return; struct num_array *a = (struct num_array *)(mem + (i * 3 * sizeof(double))); function(a); } int main(int argc, char **argv) { struct timeval start, end, t_start, t_end; int i; struct num_array *pa; char *ptr; char *sptr; char *gpu_mem; unsigned long num = NUM; int rows, cols; /*Default value of num from MACRO*/ int blocks; num = atoi(argv[1]); rows = atoi(argv[2]); cols = atoi(argv[3]); /* Allocate host (CPU) memory and initialize*/ ptr = (char *)malloc(num * 3 * sizeof(double)); sptr = ptr; for(i=0; i<num; ++i){ pa = (struct num_array *) sptr; pa->num1 = (double) i + (double) i * 0.1; pa->num2 = pa->num1 + 1.0; sptr += 3 * sizeof(double); } gettimeofday(&t_start, NULL); /* Allocate GPU memory and copy from CPU --> GPU*/ hipMalloc(&gpu_mem, num * 3 * sizeof(double)); CUDA_ERROR_EXIT("hipMalloc"); hipMemcpy(gpu_mem, ptr, num * 3 * sizeof(double) , hipMemcpyHostToDevice); CUDA_ERROR_EXIT("hipMemcpy"); gettimeofday(&start, NULL); blocks = num /(rows * cols); if(num % (rows * cols)) ++blocks; dim3 threadsPerBlock(rows, cols); hipLaunchKernelGGL(( calculate), dim3(blocks), dim3(threadsPerBlock) , 0, 0, gpu_mem, num, rows); CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); /* Copy back result*/ hipMemcpy(ptr, gpu_mem, num * 3 * sizeof(double) , hipMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); gettimeofday(&t_end, NULL); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); hipFree(gpu_mem); sptr = ptr; /*Print the last element for sanity check*/ pa = (struct num_array *) (sptr + (num -1)*3*sizeof(double)); printf("num1=%f num2=%f result=%f\n", pa->num1, pa->num2, pa->result); free(ptr); }
6cae9e6e11a3589d018ea7e05115a30985455065.cu
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define NUM 10000000 #define CUDA_ERROR_EXIT(str) do{\ cudaError err = cudaGetLastError();\ if( err != cudaSuccess){\ printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) struct num_array{ double num1; double num2; double result; }; __device__ void function(struct num_array *a) { double square = a ->num1 * a->num1 + a->num2 * a->num2 + 2 * a->num1 * a->num2; a->result = log(square)/sin(square); return; } __global__ void calculate(char *mem, int num, int rows) { int thread_id = threadIdx.x + rows * threadIdx.y;//flattened thread id value;; int i = blockDim.x * blockDim.y * blockIdx.x + thread_id; // int i_x = blockDim.x * blockIdx.x + threadIdx.x; // int i_y = blockDim.x * blockIdx.x + threadIdx.y; // int i = i_x + rows * i_y; // printf("%d %d %d\n", i, blockDim.y, blockIdx.y); //why blockDim.y is not zero?? if(i >= num) return; struct num_array *a = (struct num_array *)(mem + (i * 3 * sizeof(double))); function(a); } int main(int argc, char **argv) { struct timeval start, end, t_start, t_end; int i; struct num_array *pa; char *ptr; char *sptr; char *gpu_mem; unsigned long num = NUM; int rows, cols; /*Default value of num from MACRO*/ int blocks; num = atoi(argv[1]); rows = atoi(argv[2]); cols = atoi(argv[3]); /* Allocate host (CPU) memory and initialize*/ ptr = (char *)malloc(num * 3 * sizeof(double)); sptr = ptr; for(i=0; i<num; ++i){ pa = (struct num_array *) sptr; pa->num1 = (double) i + (double) i * 0.1; pa->num2 = pa->num1 + 1.0; sptr += 3 * sizeof(double); } gettimeofday(&t_start, NULL); /* Allocate GPU memory and copy from CPU --> GPU*/ cudaMalloc(&gpu_mem, num * 3 * sizeof(double)); CUDA_ERROR_EXIT("cudaMalloc"); cudaMemcpy(gpu_mem, ptr, num * 3 * sizeof(double) , cudaMemcpyHostToDevice); CUDA_ERROR_EXIT("cudaMemcpy"); gettimeofday(&start, NULL); blocks = num /(rows * cols); if(num % (rows * cols)) ++blocks; dim3 threadsPerBlock(rows, cols); calculate<<<blocks, threadsPerBlock >>>(gpu_mem, num, rows); CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); /* Copy back result*/ cudaMemcpy(ptr, gpu_mem, num * 3 * sizeof(double) , cudaMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); gettimeofday(&t_end, NULL); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); cudaFree(gpu_mem); sptr = ptr; /*Print the last element for sanity check*/ pa = (struct num_array *) (sptr + (num -1)*3*sizeof(double)); printf("num1=%f num2=%f result=%f\n", pa->num1, pa->num2, pa->result); free(ptr); }
48d5ecf244b5fab3dd7e8443145e0b6dec10bf89.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/Copy.h> namespace { using namespace at; using namespace at::cuda; // Copy operator for the pointwise apply kernel template <typename dst_T, typename src_T> struct CopyOp { static void apply(Tensor& dst, const Tensor& src) { CUDA_tensor_apply2<dst_T, src_T>( dst, src, [] __device__(dst_T & dst_val, const src_T& src_val) { #if __CUDA_ARCH__ >= 350 dst_val = static_cast<dst_T>( static_cast<native::inter_copy_type_t<dst_T>>(__ldg(&src_val))); #else dst_val = static_cast<dst_T>(static_cast<native::inter_copy_type_t<dst_T>>(src_val)); #endif }); } }; // device-to-device copy, does type conversion template <typename dst_T, typename src_T> void copy_device_to_device(Tensor& dst, const Tensor& src) { auto numel = dst.numel(); if (dst.is_same(src) || numel == 0) { return; } // We can memcpy the memory if: // -both tensors are contiguous; or, // -there is only one element to copy; or, // -FIXME: if both tensors have matching size and stride arrays, and no // holes within (in other words, there is some permutation that can be applied // to the size/strides such that the resulting tensor is // contiguous). // -AND: both tensors have the same type. bool same_type = std::is_same<dst_T, src_T>::value; bool memcpy_eligible = ((src.is_contiguous() && dst.is_contiguous()) || (numel == 1)) && same_type; Device src_device = src.device(); Device dst_device = dst.device(); HIPGuardMasqueradingAsCUDA device_guard(src_device); // Try to enable p2p access. This also handles the case src_device == // dst_device. bool p2pEnabled = THCState_getPeerToPeerAccess( globalContext().getTHCState(), src_device.index(), dst_device.index()); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // hipMemcpyAsync on the default stream. HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { // Perform the copy AT_CUDA_CHECK(hipMemcpyAsync( dst.data<dst_T>(), src.data<src_T>(), numel * sizeof(dst_T), hipMemcpyDeviceToDevice, copy_stream)); } else { // Non-contiguous copy or a type-conversion copy // We avoid creating temporary memory copies if possible. // If both src and dst are on the same device, or if they are on // different devices and p2p access is enabled, perform the copy // by a pointwise copy kernel. // Otherwise, we'll have to make contiguous (which will in fact // invoke copy() again), and then perform the copy. // FIXME: might want to consider only running the pointwise kernel // if both src and dst innermost dimensions are contiguous. If // they are not, then taking the hit of the memory allocation/free // might be worth it to avoid non-coalesced reads or writes. if (p2pEnabled) { CopyOp<dst_T, src_T>::apply(dst, src); } else { // GPUs can't access each other directly, but the tensors // involved are non-contiguous and/or are different types. // Make sure the src is contiguous and in the same type as dst Tensor src_contig; if (same_type) { src_contig = src.contiguous(); } else { // Types are different // Copy into the new format, contiguous, on the source device src_contig = at::empty_like(dst, src.options().dtype(dst.dtype())); CopyOp<dst_T, src_T>::apply(src_contig, src); } // Make sure the dst is contiguous device_guard.set_device(dst_device); Tensor dst_contig = dst.contiguous(); // Now, we are ready for a cross-device memcpy of contiguous // data, of the same layout and type device_guard.set_device(src_device); AT_CUDA_CHECK(hipMemcpyAsync( dst_contig.data<dst_T>(), src_contig.data<dst_T>(), numel * sizeof(dst_T), hipMemcpyDeviceToDevice, copy_stream)); if (!dst.is_contiguous()) { copy_device_to_device<dst_T, dst_T>(dst, dst_contig); } } } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index())); } AT_CUDA_CHECK(hipGetLastError()); } void copy_from_cpu(Tensor& dst, const Tensor& src) { Tensor dst_contig = dst.contiguous(); Tensor src_contig = src.contiguous(); HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA(); AT_CUDA_CHECK(hipMemcpyAsync( dst_contig.data_ptr(), src_contig.data_ptr(), src.numel() * src.dtype().itemsize(), hipMemcpyHostToDevice, stream)); AT_CUDA_CHECK(hipStreamSynchronize(stream)); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "copy_from_cpu", [&]() { copy_device_to_device<scalar_t, scalar_t>(dst, dst_contig); }); } void copy_to_cpu(Tensor& dst, const Tensor& src) { Tensor dst_contig = dst.contiguous(); Tensor src_contig = src.contiguous(); HIPGuardMasqueradingAsCUDA device_guard(src.device()); HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA(); AT_CUDA_CHECK(hipMemcpyAsync( dst_contig.data_ptr(), src_contig.data_ptr(), src.numel() * src.dtype().itemsize(), hipMemcpyDeviceToHost, stream)); AT_CUDA_CHECK(hipStreamSynchronize(stream)); _copy_same_type_(dst, dst_contig); } void copy_from_cpu_async_(Tensor& dst, const Tensor& src) { AT_CHECK(dst.is_contiguous(), "Target tensor must be contiguous."); AT_CHECK(src.is_contiguous(), "Source tensor must be contiguous."); if (dst.numel() == 0) { return; } HIPGuardMasqueradingAsCUDA device_guard(dst.device()); HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "copy_from_cpu_async", [&]() { AT_CUDA_CHECK(hipMemcpyAsync( dst.data<scalar_t>(), src.data<scalar_t>(), src.numel() * sizeof(scalar_t), hipMemcpyHostToDevice, stream)); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent( src.storage().data<scalar_t>(), stream)); }); } void copy_to_cpu_async_(Tensor& dst, const Tensor& src) { AT_CHECK(dst.is_contiguous(), "Target tensor must be contiguous."); AT_CHECK(src.is_contiguous(), "Source tensor must be contiguous."); if (dst.numel() == 0) { return; } HIPGuardMasqueradingAsCUDA device_guard(src.device()); HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "copy_to_cpu_async", [&]() { AT_CUDA_CHECK(hipMemcpyAsync( dst.data<scalar_t>(), src.data<scalar_t>(), src.numel() * sizeof(scalar_t), hipMemcpyDeviceToHost, stream)); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent( src.storage().data<scalar_t>(), stream)); }); } template <typename dst_T> void _copy__cuda(Tensor& dst, const Tensor& src, bool non_blocking) { AT_CHECK(dst.numel() == src.numel(), "sizes do not match"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "_copy__cuda", [&]() { if (dst.is_cuda() && src.is_cuda()) { copy_device_to_device<dst_T, scalar_t>(dst, src); } else if (dst.is_cuda()) { if (std::is_same<dst_T, scalar_t>::value) { if (non_blocking) { copy_from_cpu_async_(dst, src); } else { copy_from_cpu(dst, src); } } else { // Do a dtype converting copy on the CPU, then copy to device Tensor srcf = at::empty_like(src, src.options().dtype(dst.dtype())); s_copy_(srcf, src); copy_from_cpu(dst, srcf); } } else { if (std::is_same<dst_T, scalar_t>::value) { if (non_blocking) { copy_to_cpu_async_(dst, src); } else { copy_to_cpu(dst, src); } } else { // Copy to CPU as the same dtype, then do a dtype converting copy Tensor srcf = at::empty_like(src, dst.options().dtype(src.dtype())); copy_to_cpu(srcf, src); s_copy_(dst, srcf); } } }); } } // namespace namespace at { namespace native { Tensor& _s_copy__cuda(Tensor& self, const Tensor& src, bool non_blocking) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "_copy__cuda", [&]() { ::_copy__cuda<scalar_t>(self, src, non_blocking); }); return self; } Tensor _s_copy_from_cuda( const Tensor& self, const Tensor& dst, bool non_blocking) { Tensor dst_ = dst; _s_copy__cuda(dst_, self); return dst; } } // namespace native } // namespace at
48d5ecf244b5fab3dd7e8443145e0b6dec10bf89.cu
#include <ATen/ATen.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAEvent.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/Copy.h> namespace { using namespace at; using namespace at::cuda; // Copy operator for the pointwise apply kernel template <typename dst_T, typename src_T> struct CopyOp { static void apply(Tensor& dst, const Tensor& src) { CUDA_tensor_apply2<dst_T, src_T>( dst, src, [] __device__(dst_T & dst_val, const src_T& src_val) { #if __CUDA_ARCH__ >= 350 dst_val = static_cast<dst_T>( static_cast<native::inter_copy_type_t<dst_T>>(__ldg(&src_val))); #else dst_val = static_cast<dst_T>(static_cast<native::inter_copy_type_t<dst_T>>(src_val)); #endif }); } }; // device-to-device copy, does type conversion template <typename dst_T, typename src_T> void copy_device_to_device(Tensor& dst, const Tensor& src) { auto numel = dst.numel(); if (dst.is_same(src) || numel == 0) { return; } // We can memcpy the memory if: // -both tensors are contiguous; or, // -there is only one element to copy; or, // -FIXME: if both tensors have matching size and stride arrays, and no // holes within (in other words, there is some permutation that can be applied // to the size/strides such that the resulting tensor is // contiguous). // -AND: both tensors have the same type. bool same_type = std::is_same<dst_T, src_T>::value; bool memcpy_eligible = ((src.is_contiguous() && dst.is_contiguous()) || (numel == 1)) && same_type; Device src_device = src.device(); Device dst_device = dst.device(); CUDAGuard device_guard(src_device); // Try to enable p2p access. This also handles the case src_device == // dst_device. bool p2pEnabled = THCState_getPeerToPeerAccess( globalContext().getTHCState(), src_device.index(), dst_device.index()); // We always perform the copy on the source device, using the current stream // on the source device, and we fully synchronize on both src and dst's // current streams for completion of the copy. We have to explicitly do this // for non-contig copies. This mimics the behavior of cross-device // cudaMemcpyAsync on the default stream. CUDAStream copy_stream = getCurrentCUDAStream(src_device.index()); if (src_device != dst_device) { // This is a cross-device copy on the src current stream and dst current // stream. We perform a two-way barrier between both devices' streams // before the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are handled, so // that no one is operating on the dst memory when we perform the copy. // src waits on dst barrier (src already waits on src) CUDAEvent dst_ready; device_guard.set_device(dst_device); dst_ready.record(getCurrentCUDAStream(dst_device.index())); device_guard.set_device(src_device); dst_ready.block(copy_stream); } if (memcpy_eligible) { // Perform the copy AT_CUDA_CHECK(cudaMemcpyAsync( dst.data<dst_T>(), src.data<src_T>(), numel * sizeof(dst_T), cudaMemcpyDeviceToDevice, copy_stream)); } else { // Non-contiguous copy or a type-conversion copy // We avoid creating temporary memory copies if possible. // If both src and dst are on the same device, or if they are on // different devices and p2p access is enabled, perform the copy // by a pointwise copy kernel. // Otherwise, we'll have to make contiguous (which will in fact // invoke copy() again), and then perform the copy. // FIXME: might want to consider only running the pointwise kernel // if both src and dst innermost dimensions are contiguous. If // they are not, then taking the hit of the memory allocation/free // might be worth it to avoid non-coalesced reads or writes. if (p2pEnabled) { CopyOp<dst_T, src_T>::apply(dst, src); } else { // GPUs can't access each other directly, but the tensors // involved are non-contiguous and/or are different types. // Make sure the src is contiguous and in the same type as dst Tensor src_contig; if (same_type) { src_contig = src.contiguous(); } else { // Types are different // Copy into the new format, contiguous, on the source device src_contig = at::empty_like(dst, src.options().dtype(dst.dtype())); CopyOp<dst_T, src_T>::apply(src_contig, src); } // Make sure the dst is contiguous device_guard.set_device(dst_device); Tensor dst_contig = dst.contiguous(); // Now, we are ready for a cross-device memcpy of contiguous // data, of the same layout and type device_guard.set_device(src_device); AT_CUDA_CHECK(cudaMemcpyAsync( dst_contig.data<dst_T>(), src_contig.data<dst_T>(), numel * sizeof(dst_T), cudaMemcpyDeviceToDevice, copy_stream)); if (!dst.is_contiguous()) { copy_device_to_device<dst_T, dst_T>(dst, dst_contig); } } } if (src_device != dst_device) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on src_device, record stream event CUDAEvent src_ready; src_ready.record(copy_stream); device_guard.set_device(dst_device); src_ready.block(getCurrentCUDAStream(dst_device.index())); } AT_CUDA_CHECK(cudaGetLastError()); } void copy_from_cpu(Tensor& dst, const Tensor& src) { Tensor dst_contig = dst.contiguous(); Tensor src_contig = src.contiguous(); CUDAStream stream = getCurrentCUDAStream(); AT_CUDA_CHECK(cudaMemcpyAsync( dst_contig.data_ptr(), src_contig.data_ptr(), src.numel() * src.dtype().itemsize(), cudaMemcpyHostToDevice, stream)); AT_CUDA_CHECK(cudaStreamSynchronize(stream)); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "copy_from_cpu", [&]() { copy_device_to_device<scalar_t, scalar_t>(dst, dst_contig); }); } void copy_to_cpu(Tensor& dst, const Tensor& src) { Tensor dst_contig = dst.contiguous(); Tensor src_contig = src.contiguous(); CUDAGuard device_guard(src.device()); CUDAStream stream = getCurrentCUDAStream(); AT_CUDA_CHECK(cudaMemcpyAsync( dst_contig.data_ptr(), src_contig.data_ptr(), src.numel() * src.dtype().itemsize(), cudaMemcpyDeviceToHost, stream)); AT_CUDA_CHECK(cudaStreamSynchronize(stream)); _copy_same_type_(dst, dst_contig); } void copy_from_cpu_async_(Tensor& dst, const Tensor& src) { AT_CHECK(dst.is_contiguous(), "Target tensor must be contiguous."); AT_CHECK(src.is_contiguous(), "Source tensor must be contiguous."); if (dst.numel() == 0) { return; } CUDAGuard device_guard(dst.device()); CUDAStream stream = getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "copy_from_cpu_async", [&]() { AT_CUDA_CHECK(cudaMemcpyAsync( dst.data<scalar_t>(), src.data<scalar_t>(), src.numel() * sizeof(scalar_t), cudaMemcpyHostToDevice, stream)); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent( src.storage().data<scalar_t>(), stream)); }); } void copy_to_cpu_async_(Tensor& dst, const Tensor& src) { AT_CHECK(dst.is_contiguous(), "Target tensor must be contiguous."); AT_CHECK(src.is_contiguous(), "Source tensor must be contiguous."); if (dst.numel() == 0) { return; } CUDAGuard device_guard(src.device()); CUDAStream stream = getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "copy_to_cpu_async", [&]() { AT_CUDA_CHECK(cudaMemcpyAsync( dst.data<scalar_t>(), src.data<scalar_t>(), src.numel() * sizeof(scalar_t), cudaMemcpyDeviceToHost, stream)); AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent( src.storage().data<scalar_t>(), stream)); }); } template <typename dst_T> void _copy__cuda(Tensor& dst, const Tensor& src, bool non_blocking) { AT_CHECK(dst.numel() == src.numel(), "sizes do not match"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, src.scalar_type(), "_copy__cuda", [&]() { if (dst.is_cuda() && src.is_cuda()) { copy_device_to_device<dst_T, scalar_t>(dst, src); } else if (dst.is_cuda()) { if (std::is_same<dst_T, scalar_t>::value) { if (non_blocking) { copy_from_cpu_async_(dst, src); } else { copy_from_cpu(dst, src); } } else { // Do a dtype converting copy on the CPU, then copy to device Tensor srcf = at::empty_like(src, src.options().dtype(dst.dtype())); s_copy_(srcf, src); copy_from_cpu(dst, srcf); } } else { if (std::is_same<dst_T, scalar_t>::value) { if (non_blocking) { copy_to_cpu_async_(dst, src); } else { copy_to_cpu(dst, src); } } else { // Copy to CPU as the same dtype, then do a dtype converting copy Tensor srcf = at::empty_like(src, dst.options().dtype(src.dtype())); copy_to_cpu(srcf, src); s_copy_(dst, srcf); } } }); } } // namespace namespace at { namespace native { Tensor& _s_copy__cuda(Tensor& self, const Tensor& src, bool non_blocking) { AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "_copy__cuda", [&]() { ::_copy__cuda<scalar_t>(self, src, non_blocking); }); return self; } Tensor _s_copy_from_cuda( const Tensor& self, const Tensor& dst, bool non_blocking) { Tensor dst_ = dst; _s_copy__cuda(dst_, self); return dst; } } // namespace native } // namespace at
660d48598bba84d969e56a7438d9ab54d5cd48b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--blockDim=[16,8] --gridDim=4 /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ // IMPERIAL EDIT //#include <stdio.h> #include "cuenergy.h" //#if UNROLLX != 8 //# error "UNROLLX must be 8" //#endif #if BLOCKSIZEX != 16 # error "BLOCKSIZEX must be 16" #endif // Max constant buffer size is 64KB, minus whatever // the CUDA runtime and compiler are using that we don't know about. // At 16 bytes for atom, for this program 4070 atoms is about the max // we can store in the constant buffer. // IMPERIAL EDIT //__constant__ float4 atominfo[MAXATOMS]; __constant__ float4 atominfo[1]; // This kernel calculates coulombic potential at each grid point and // stores the results in the output array. __global__ void cenergy(int numatoms, float gridspacing, float * energygrid) { unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX + threadIdx.x; unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex + xindex; float coory = gridspacing * yindex; float coorx = gridspacing * xindex; float energyvalx1=0.0f; float energyvalx2=0.0f; float gridspacing_u = gridspacing * BLOCKSIZEX; int atomid; for (atomid=0; atomid<numatoms; atomid++) { float dy = coory - atominfo[atomid].y; float dyz2 = (dy * dy) + atominfo[atomid].z; float dx1 = coorx - atominfo[atomid].x; float dx2 = dx1 + gridspacing_u; energyvalx1 += atominfo[atomid].w * (1.0f / sqrtf(dx1*dx1 + dyz2)); energyvalx2 += atominfo[atomid].w * (1.0f / sqrtf(dx2*dx2 + dyz2)); } energygrid[outaddr] += energyvalx1; energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2; }
660d48598bba84d969e56a7438d9ab54d5cd48b0.cu
//pass //--blockDim=[16,8] --gridDim=4 /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ // IMPERIAL EDIT //#include <stdio.h> #include "cuenergy.h" //#if UNROLLX != 8 //# error "UNROLLX must be 8" //#endif #if BLOCKSIZEX != 16 # error "BLOCKSIZEX must be 16" #endif // Max constant buffer size is 64KB, minus whatever // the CUDA runtime and compiler are using that we don't know about. // At 16 bytes for atom, for this program 4070 atoms is about the max // we can store in the constant buffer. // IMPERIAL EDIT //__constant__ float4 atominfo[MAXATOMS]; __constant__ float4 atominfo[1]; // This kernel calculates coulombic potential at each grid point and // stores the results in the output array. __global__ void cenergy(int numatoms, float gridspacing, float * energygrid) { unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX + threadIdx.x; unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex + xindex; float coory = gridspacing * yindex; float coorx = gridspacing * xindex; float energyvalx1=0.0f; float energyvalx2=0.0f; float gridspacing_u = gridspacing * BLOCKSIZEX; int atomid; for (atomid=0; atomid<numatoms; atomid++) { float dy = coory - atominfo[atomid].y; float dyz2 = (dy * dy) + atominfo[atomid].z; float dx1 = coorx - atominfo[atomid].x; float dx2 = dx1 + gridspacing_u; energyvalx1 += atominfo[atomid].w * (1.0f / sqrtf(dx1*dx1 + dyz2)); energyvalx2 += atominfo[atomid].w * (1.0f / sqrtf(dx2*dx2 + dyz2)); } energygrid[outaddr] += energyvalx1; energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2; }
a25e79d664fbc3e045ed2b04e0a4e2cc4f4388af.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #include "b2b_gemm_f16t_f16n_f16t_tensor_op_f16_sm75.h" #include "b2b_gemm_s8n_s8t_s8n_tensor_op_s32_sm75.h" int run() { hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } #if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) run_nonfused_gemm_f16(); run_fused_gemm_f16(); run_nonfused_gemm_s8(); run_fused_gemm_s8(); #endif return 0; } int main() { // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } else { return run(); } }
a25e79d664fbc3e045ed2b04e0a4e2cc4f4388af.cu
/*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #include "b2b_gemm_f16t_f16n_f16t_tensor_op_f16_sm75.h" #include "b2b_gemm_s8n_s8t_s8n_tensor_op_s32_sm75.h" int run() { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } #if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) run_nonfused_gemm_f16(); run_fused_gemm_f16(); run_nonfused_gemm_s8(); run_fused_gemm_s8(); #endif return 0; } int main() { // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } else { return run(); } }
ae79ba22ce6b120a9efd43fa96bde7b1708500dd.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** MIT License Copyright (c) 2016 Antti-Pekka Hynninen Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #include <hip/hip_runtime.h> #include "CudaUtils.h" #include "cuttGpuModelKernel.h" #define RESTRICT //__restrict__ // // Global memory access statistics // struct MemStat { int gld_tran; int gst_tran; int gld_req; int gst_req; int cl_full_l2; int cl_part_l2; int cl_full_l1; int cl_part_l1; // int l1_tran; __device__ __forceinline__ void clear() { gld_tran = 0; gst_tran = 0; gld_req = 0; gst_req = 0; cl_full_l2 = 0; cl_part_l2 = 0; cl_full_l1 = 0; cl_part_l1 = 0; // l1_tran = 0; } }; // // Returns scalar tensor position. Each lane has the same p // NOTE: c and d on inactive warps must be 1 !! // __device__ __forceinline__ int tensorPos( const int p, const int rank, const int c, const int d, const int ct, const int numLane=warpSize ) { int r = ((p/c) % d)*ct; #pragma unroll for (int i=numLane/2;i >= 1;i/=2) { r += __shfl_xor_sync(0xffffffff,r,i); } return r; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int pos, const int n, const int accWidth, const int warpLane) { int seg0 = pos/accWidth; int srcLane = (warpLane == 0 || warpLane >= n) ? (warpLane) : (warpLane - 1); int seg1 = __shfl_sync(0xffffffff,seg0,srcLane); int count = __popc(__ballot_sync(0xffffffff,seg0 != seg1)) + 1; count = (n == 0) ? 0 : count; return count; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int* segbuf, const int n) { int count = 0; for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int seg_prev = (i - 1 >= 0) ? segbuf[i - 1] : -1; count += (seg != seg_prev); } return count; } // // Counts number of full and partial cache lines for a warp that accesses per warp // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ void countCacheLines(const int pos, const int n, const int cacheWidth, const int warpLane, int& cl_full, int& cl_part) { int seg = pos/cacheWidth; // Lane is at the beginning of a full cache line, if seg0 matches seg0 cacheWidth - 1 away int readLane = warpLane + (cacheWidth - 1); int val = (seg == __shfl_sync(0xffffffff,seg,readLane)); val = (readLane < n) ? val : 0; cl_full += val; unsigned int valbit = (((val << cacheWidth) - 1)*val) << warpLane; // Perform warpSize-way bitwise or #pragma unroll for (int i=warpSize/2;i >= 1;i/=2) { valbit |= __shfl_xor_sync(0xffffffff,valbit,i); } // Now: lanes with valbit set are part of a full cache line, // lanes with valbit unset are part of a partial cache line int full = (valbit >> warpLane) & 1; seg = (warpLane < n) ? seg : -1; int segP1 = __shfl_down_sync(0xffffffff,seg,1); segP1 = (warpLane + 1 < warpSize) ? segP1 : -1; int val2 = ((!full) && seg != segP1); cl_part += val2; } // // Counts number of full and partial cache lines for a warp that accesses // memory at cachelines segbuf[0] ... segbuf[n - 1] // __device__ __forceinline__ void countCacheLines(int* segbuf, const int n, const int cacheWidth, int& cl_full, int& cl_part) { const int topbit = (1 << 31); const int lowbits = ~(1 << 31); for (int i = threadIdx.x;i < n;i += blockDim.x) { // seg[i] is at the beginning of a full cache line, if seg[i] matches seg[i + cacheWidth - 1] int i1 = i + (cacheWidth - 1); int val = 0; if (i1 < n) val = ((segbuf[i] & lowbits) == (segbuf[i1] & lowbits)); cl_full += val; // Mark full cache lines with top bit set to 1 if (val) { for (int j=0;j < cacheWidth;j++) { if (i + j < n) segbuf[i + j] |= topbit; } } } __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int segP1 = (i + 1 < n) ? segbuf[i + 1] : -1; int part = ((seg & topbit) == 0); int val2 = (part && seg != segP1); cl_part += val2; } // Clear top bits __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { segbuf[i] &= lowbits; } } // // Runs countGlTransactions and countCacheLines counters for testing // Unused values in posData[] are marked with "-1" // __global__ void runCountersKernel(const int* posData, const int numPosData, const int accWidth, const int cacheWidth, int* tranData, int* cl_fullData, int* cl_partData) { const int warpLane = threadIdx.x & (warpSize - 1); for (int i=threadIdx.x + blockIdx.x*blockDim.x;i < numPosData;i+=blockDim.x*gridDim.x) { int pos = posData[i]; int flag = (pos == -1); int ffsval = __ffs(__ballot_sync(0xffffffff,flag)) - 1; int n = (__any_sync(0xffffffff,flag)) ? ffsval : warpSize; int tran = countGlTransactions(pos, n, accWidth, warpLane); int cl_full = 0; int cl_part = 0; countCacheLines(pos, n, cacheWidth, warpLane, cl_full, cl_part); #pragma unroll for (int k=warpSize/2;k >= 1;k/=2) { cl_full += __shfl_xor_sync(0xffffffff,cl_full,k); cl_part += __shfl_xor_sync(0xffffffff,cl_part,k); } int j = i / warpSize; tranData[j] = tran; cl_fullData[j] = cl_full; cl_partData[j] = cl_part; } } // // Reduce memStat within warp and write result to global memory // NOTE: Not super-efficient since every warp does atomicAdd(). // __device__ __forceinline__ void writeMemStat(const int warpLane, MemStat memStat, MemStat* RESTRICT glMemStat) { for (int i=16;i >= 1;i/=2) { // memStat.gld_tran += __shfl_xor_sync(0xffffffff,memStat.gld_tran,i); // memStat.gst_tran += __shfl_xor_sync(0xffffffff,memStat.gst_tran,i); // memStat.gld_req += __shfl_xor_sync(0xffffffff,memStat.gld_req,i); // memStat.gst_req += __shfl_xor_sync(0xffffffff,memStat.gst_req,i); memStat.cl_full_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l2,i); memStat.cl_part_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l2,i); memStat.cl_full_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l1,i); memStat.cl_part_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l1,i); // memStat.l1_tran += __shfl_xor_sync(0xffffffff,memStat.l1_tran,i); } if (warpLane == 0) { atomicAdd(&(glMemStat->gld_tran), memStat.gld_tran); atomicAdd(&(glMemStat->gst_tran), memStat.gst_tran); atomicAdd(&(glMemStat->gld_req), memStat.gld_req); atomicAdd(&(glMemStat->gst_req), memStat.gst_req); atomicAdd(&(glMemStat->cl_full_l2), memStat.cl_full_l2); atomicAdd(&(glMemStat->cl_part_l2), memStat.cl_part_l2); atomicAdd(&(glMemStat->cl_full_l1), memStat.cl_full_l1); atomicAdd(&(glMemStat->cl_part_l1), memStat.cl_part_l1); // atomicAdd(&(glMemStat->l1_tran), memStat.l1_tran); } } // // Transpose when Mm and Mk don't overlap and contain only single rank // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiled( const int numMm, const int volMbar, const int sizeMbar, const int2 tiledVol, const int cuDimMk, const int cuDimMm, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int xin = bx + threadIdx.x; const int yin = by + threadIdx.y; const int xout = bx + threadIdx.y; const int yout = by + threadIdx.x; const unsigned int maskIny = __ballot_sync(0xffffffff,(yin + warpLane < tiledVol.y))*(xin < tiledVol.x); const unsigned int maskOutx = __ballot_sync(0xffffffff,(xout + warpLane < tiledVol.x))*(yout < tiledVol.y); const int posMinorIn = xin + yin*cuDimMk; const int posMinorOut = yout + xout*cuDimMm; const int posInAdd = TILEROWS*cuDimMk; const int posOutAdd = TILEROWS*cuDimMm; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Compute global memory positions int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i); posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i); } int posIn = posMajorIn + posMinorIn; int posOut = posMajorOut + posMinorOut; // Read data into shared memory tile #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskIny & (1 << j))); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); posIn += posInAdd; } #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskOutx & (1 << j))); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); posOut += posOutAdd; } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed transpose. Thread block loads plan.volMmk number of elements // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPacked( const int volMmk, const int volMbar, const int sizeMmk, const int sizeMbar, const TensorConvInOut* RESTRICT gl_Mmk, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = gl_Mmk[warpLane]; } // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = 0; posMmkOut[j] = 0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } // 6 registers TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmk) shSegOut[posMmk] = posOut/cacheWidth; } __syncthreads(); countCacheLines(shSegOut, volMmk, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmk;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmk, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmk); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed method with a split rank // // dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1) // dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1) // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPackedSplit( const int splitDim, const int volMmkUnsplit, const int volMbar, const int sizeMmk, const int sizeMbar, const int cMmSplit, const int cMkSplit, const TensorConvInOut* RESTRICT glMmk, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); // const int plusone = (blockIdx.x < (splitDim % gridDim.x)); const int p0 = blockIdx.x*splitDim/gridDim.x; const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0; const int plusone = volSplit - splitDim/gridDim.x; TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = glMmk[warpLane + plusone*sizeMmk]; } // gridDim.x = number of splits // blockIdx.x = {0 ... gridDim.x - 1} is the split-index // Volume of this split // const int volSplit = (splitDim/gridDim.x) + plusone; // Start position in this split // const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x)); const int posMmkIn0 = p0*cMmSplit; const int posMmkOut0 = p0*cMkSplit; // Volume of split Mmk const int volMmkSplit = volSplit*volMmkUnsplit; // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = posMmkIn0; posMmkOut[j] = posMmkOut0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int t = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmkSplit) shSegOut[posMmk] = posOut / cacheWidth; // countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full, memStat.cl_part); } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmkSplit;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmkSplit); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2) // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiledCopy( const int numMm, const int volMbar, const int sizeMbar, const int cuDimMk, const int cuDimMm, const int2 tiledVol, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int x = bx + threadIdx.x; const int y = by + threadIdx.y; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Read global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_in, Mbar.d_in, Mbar.ct_in); pos0 += x + y*cuDimMk; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMk; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gld_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } } // Write global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_out, Mbar.d_out, Mbar.ct_out); pos0 += x + y*cuDimMm; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMm; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gst_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(pos, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); } } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } //###################################################################################### //###################################################################################### //###################################################################################### void runCounters(const int warpSize, const int* hostPosData, const int numPosData, const int accWidth, const int cacheWidth, int* host_tran, int* host_cl_full, int* host_cl_part) { const int numWarp = numPosData/warpSize; int* devPosData; allocate_device<int>(&devPosData, numPosData); copy_HtoD<int>(hostPosData, devPosData, numPosData); int* dev_tran; int* dev_cl_full; int* dev_cl_part; allocate_device<int>(&dev_tran, numWarp); allocate_device<int>(&dev_cl_full, numWarp); allocate_device<int>(&dev_cl_part, numWarp); int nthread = 512; int nblock = (numPosData - 1)/nthread + 1; hipLaunchKernelGGL(( runCountersKernel), dim3(nblock), dim3(nthread) , 0, 0, devPosData, numPosData, accWidth, cacheWidth, dev_tran, dev_cl_full, dev_cl_part); cudaCheck(hipGetLastError()); copy_DtoH<int>(dev_tran, host_tran, numWarp); copy_DtoH<int>(dev_cl_full, host_cl_full, numWarp); copy_DtoH<int>(dev_cl_part, host_cl_part, numWarp); cudaCheck(hipDeviceSynchronize()); deallocate_device<int>(&dev_tran); deallocate_device<int>(&dev_cl_full); deallocate_device<int>(&dev_cl_part); deallocate_device<int>(&devPosData); } bool cuttGpuModelKernel(cuttPlan_t& plan, const int accWidth, const int cacheWidth, int& gld_tran, int& gst_tran, int& gld_req, int& gst_req, int& cl_full_l2, int& cl_part_l2, int& cl_full_l1, int& cl_part_l1) { LaunchConfig& lc = plan.launchConfig; TensorSplit& ts = plan.tensorSplit; MemStat* devMemStat; allocate_device<MemStat>(&devMemStat, 1); set_device_array<MemStat>(devMemStat, 0, 1, plan.stream); switch(ts.method) { case Trivial: { return false; } case Packed: { switch(lc.numRegStorage) { #define CALL0(NREG) \ hipLaunchKernelGGL(( countPacked<NREG>) , dim3(lc.numblock), dim3(lc.numthread), ts.volMmk*sizeof(int), plan.stream , \ ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case PackedSplit: { // Calculate max. volume of split Mmk const int volSplit = (ts.splitDim/ts.numSplit) + ((ts.splitDim % ts.numSplit) != 0); const int volMmkSplit = volSplit*ts.volMmkUnsplit; switch(lc.numRegStorage) { #define CALL0(NREG) \ hipLaunchKernelGGL(( countPackedSplit<NREG>) , dim3(lc.numblock), dim3(lc.numthread), volMmkSplit*sizeof(int), plan.stream , \ ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case Tiled: { hipLaunchKernelGGL(( countTiled) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , ((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; case TiledCopy: { hipLaunchKernelGGL(( countTiledCopy) , dim3(lc.numblock), dim3(lc.numthread), 0, plan.stream , ((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; } cudaCheck(hipGetLastError()); MemStat hostMemStat; copy_DtoH<MemStat>(devMemStat, &hostMemStat, 1, plan.stream); cudaCheck(hipDeviceSynchronize()); deallocate_device<MemStat>(&devMemStat); gld_tran = hostMemStat.gld_tran; gst_tran = hostMemStat.gst_tran; gld_req = hostMemStat.gld_req; gst_req = hostMemStat.gst_req; cl_full_l2 = hostMemStat.cl_full_l2; cl_part_l2 = hostMemStat.cl_part_l2; cl_full_l1 = hostMemStat.cl_full_l1; cl_part_l1 = hostMemStat.cl_part_l1; // l1_tran = hostMemStat.l1_tran; return true; }
ae79ba22ce6b120a9efd43fa96bde7b1708500dd.cu
/****************************************************************************** MIT License Copyright (c) 2016 Antti-Pekka Hynninen Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #include <cuda.h> #include "CudaUtils.h" #include "cuttGpuModelKernel.h" #define RESTRICT //__restrict__ // // Global memory access statistics // struct MemStat { int gld_tran; int gst_tran; int gld_req; int gst_req; int cl_full_l2; int cl_part_l2; int cl_full_l1; int cl_part_l1; // int l1_tran; __device__ __forceinline__ void clear() { gld_tran = 0; gst_tran = 0; gld_req = 0; gst_req = 0; cl_full_l2 = 0; cl_part_l2 = 0; cl_full_l1 = 0; cl_part_l1 = 0; // l1_tran = 0; } }; // // Returns scalar tensor position. Each lane has the same p // NOTE: c and d on inactive warps must be 1 !! // __device__ __forceinline__ int tensorPos( const int p, const int rank, const int c, const int d, const int ct, const int numLane=warpSize ) { int r = ((p/c) % d)*ct; #pragma unroll for (int i=numLane/2;i >= 1;i/=2) { r += __shfl_xor_sync(0xffffffff,r,i); } return r; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int pos, const int n, const int accWidth, const int warpLane) { int seg0 = pos/accWidth; int srcLane = (warpLane == 0 || warpLane >= n) ? (warpLane) : (warpLane - 1); int seg1 = __shfl_sync(0xffffffff,seg0,srcLane); int count = __popc(__ballot_sync(0xffffffff,seg0 != seg1)) + 1; count = (n == 0) ? 0 : count; return count; } // // Counts number of global memory transactions for a warp that accesses // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ int countGlTransactions(const int* segbuf, const int n) { int count = 0; for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int seg_prev = (i - 1 >= 0) ? segbuf[i - 1] : -1; count += (seg != seg_prev); } return count; } // // Counts number of full and partial cache lines for a warp that accesses per warp // memory at pos using warp lanes 0, ..., n - 1 // __device__ __forceinline__ void countCacheLines(const int pos, const int n, const int cacheWidth, const int warpLane, int& cl_full, int& cl_part) { int seg = pos/cacheWidth; // Lane is at the beginning of a full cache line, if seg0 matches seg0 cacheWidth - 1 away int readLane = warpLane + (cacheWidth - 1); int val = (seg == __shfl_sync(0xffffffff,seg,readLane)); val = (readLane < n) ? val : 0; cl_full += val; unsigned int valbit = (((val << cacheWidth) - 1)*val) << warpLane; // Perform warpSize-way bitwise or #pragma unroll for (int i=warpSize/2;i >= 1;i/=2) { valbit |= __shfl_xor_sync(0xffffffff,valbit,i); } // Now: lanes with valbit set are part of a full cache line, // lanes with valbit unset are part of a partial cache line int full = (valbit >> warpLane) & 1; seg = (warpLane < n) ? seg : -1; int segP1 = __shfl_down_sync(0xffffffff,seg,1); segP1 = (warpLane + 1 < warpSize) ? segP1 : -1; int val2 = ((!full) && seg != segP1); cl_part += val2; } // // Counts number of full and partial cache lines for a warp that accesses // memory at cachelines segbuf[0] ... segbuf[n - 1] // __device__ __forceinline__ void countCacheLines(int* segbuf, const int n, const int cacheWidth, int& cl_full, int& cl_part) { const int topbit = (1 << 31); const int lowbits = ~(1 << 31); for (int i = threadIdx.x;i < n;i += blockDim.x) { // seg[i] is at the beginning of a full cache line, if seg[i] matches seg[i + cacheWidth - 1] int i1 = i + (cacheWidth - 1); int val = 0; if (i1 < n) val = ((segbuf[i] & lowbits) == (segbuf[i1] & lowbits)); cl_full += val; // Mark full cache lines with top bit set to 1 if (val) { for (int j=0;j < cacheWidth;j++) { if (i + j < n) segbuf[i + j] |= topbit; } } } __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { int seg = segbuf[i]; int segP1 = (i + 1 < n) ? segbuf[i + 1] : -1; int part = ((seg & topbit) == 0); int val2 = (part && seg != segP1); cl_part += val2; } // Clear top bits __syncthreads(); for (int i = threadIdx.x;i < n;i += blockDim.x) { segbuf[i] &= lowbits; } } // // Runs countGlTransactions and countCacheLines counters for testing // Unused values in posData[] are marked with "-1" // __global__ void runCountersKernel(const int* posData, const int numPosData, const int accWidth, const int cacheWidth, int* tranData, int* cl_fullData, int* cl_partData) { const int warpLane = threadIdx.x & (warpSize - 1); for (int i=threadIdx.x + blockIdx.x*blockDim.x;i < numPosData;i+=blockDim.x*gridDim.x) { int pos = posData[i]; int flag = (pos == -1); int ffsval = __ffs(__ballot_sync(0xffffffff,flag)) - 1; int n = (__any_sync(0xffffffff,flag)) ? ffsval : warpSize; int tran = countGlTransactions(pos, n, accWidth, warpLane); int cl_full = 0; int cl_part = 0; countCacheLines(pos, n, cacheWidth, warpLane, cl_full, cl_part); #pragma unroll for (int k=warpSize/2;k >= 1;k/=2) { cl_full += __shfl_xor_sync(0xffffffff,cl_full,k); cl_part += __shfl_xor_sync(0xffffffff,cl_part,k); } int j = i / warpSize; tranData[j] = tran; cl_fullData[j] = cl_full; cl_partData[j] = cl_part; } } // // Reduce memStat within warp and write result to global memory // NOTE: Not super-efficient since every warp does atomicAdd(). // __device__ __forceinline__ void writeMemStat(const int warpLane, MemStat memStat, MemStat* RESTRICT glMemStat) { for (int i=16;i >= 1;i/=2) { // memStat.gld_tran += __shfl_xor_sync(0xffffffff,memStat.gld_tran,i); // memStat.gst_tran += __shfl_xor_sync(0xffffffff,memStat.gst_tran,i); // memStat.gld_req += __shfl_xor_sync(0xffffffff,memStat.gld_req,i); // memStat.gst_req += __shfl_xor_sync(0xffffffff,memStat.gst_req,i); memStat.cl_full_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l2,i); memStat.cl_part_l2 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l2,i); memStat.cl_full_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_full_l1,i); memStat.cl_part_l1 += __shfl_xor_sync(0xffffffff,memStat.cl_part_l1,i); // memStat.l1_tran += __shfl_xor_sync(0xffffffff,memStat.l1_tran,i); } if (warpLane == 0) { atomicAdd(&(glMemStat->gld_tran), memStat.gld_tran); atomicAdd(&(glMemStat->gst_tran), memStat.gst_tran); atomicAdd(&(glMemStat->gld_req), memStat.gld_req); atomicAdd(&(glMemStat->gst_req), memStat.gst_req); atomicAdd(&(glMemStat->cl_full_l2), memStat.cl_full_l2); atomicAdd(&(glMemStat->cl_part_l2), memStat.cl_part_l2); atomicAdd(&(glMemStat->cl_full_l1), memStat.cl_full_l1); atomicAdd(&(glMemStat->cl_part_l1), memStat.cl_part_l1); // atomicAdd(&(glMemStat->l1_tran), memStat.l1_tran); } } // // Transpose when Mm and Mk don't overlap and contain only single rank // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMk-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiled( const int numMm, const int volMbar, const int sizeMbar, const int2 tiledVol, const int cuDimMk, const int cuDimMm, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int xin = bx + threadIdx.x; const int yin = by + threadIdx.y; const int xout = bx + threadIdx.y; const int yout = by + threadIdx.x; const unsigned int maskIny = __ballot_sync(0xffffffff,(yin + warpLane < tiledVol.y))*(xin < tiledVol.x); const unsigned int maskOutx = __ballot_sync(0xffffffff,(xout + warpLane < tiledVol.x))*(yout < tiledVol.y); const int posMinorIn = xin + yin*cuDimMk; const int posMinorOut = yout + xout*cuDimMm; const int posInAdd = TILEROWS*cuDimMk; const int posOutAdd = TILEROWS*cuDimMm; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Compute global memory positions int posMajorIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; int posMajorOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMajorIn += __shfl_xor_sync(0xffffffff,posMajorIn,i); posMajorOut += __shfl_xor_sync(0xffffffff,posMajorOut,i); } int posIn = posMajorIn + posMinorIn; int posOut = posMajorOut + posMinorOut; // Read data into shared memory tile #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskIny & (1 << j))); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); posIn += posInAdd; } #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int n = __popc(__ballot_sync(0xffffffff,maskOutx & (1 << j))); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); posOut += posOutAdd; } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed transpose. Thread block loads plan.volMmk number of elements // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPacked( const int volMmk, const int volMbar, const int sizeMmk, const int sizeMbar, const TensorConvInOut* RESTRICT gl_Mmk, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = gl_Mmk[warpLane]; } // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = 0; posMmkOut[j] = 0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((posMmk / __shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } // 6 registers TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.x;posMbar < volMbar;posMbar += gridDim.x) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmk)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmk) shSegOut[posMmk] = posOut/cacheWidth; } __syncthreads(); countCacheLines(shSegOut, volMmk, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmk;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmk, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmk); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Packed method with a split rank // // dim nthread(((volMmkWithSplit - 1)/(prop.warpSize*lc.numRegStorage) + 1)*prop.warpSize, 1, 1) // dim nblock(ts.numSplit, min(256, max(1, ts.volMbar)), 1) // template <int numRegStorage> __global__ void __launch_bounds__(1024, 1) countPackedSplit( const int splitDim, const int volMmkUnsplit, const int volMbar, const int sizeMmk, const int sizeMbar, const int cMmSplit, const int cMkSplit, const TensorConvInOut* RESTRICT glMmk, const TensorConvInOut* RESTRICT glMbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { extern __shared__ int shSegOut[]; const int warpLane = threadIdx.x & (warpSize - 1); // const int plusone = (blockIdx.x < (splitDim % gridDim.x)); const int p0 = blockIdx.x*splitDim/gridDim.x; const int volSplit = (blockIdx.x + 1)*splitDim/gridDim.x - p0; const int plusone = volSplit - splitDim/gridDim.x; TensorConvInOut Mmk; Mmk.c_in = 1; Mmk.d_in = 1; Mmk.c_out = 1; Mmk.d_out = 1; if (warpLane < sizeMmk) { Mmk = glMmk[warpLane + plusone*sizeMmk]; } // gridDim.x = number of splits // blockIdx.x = {0 ... gridDim.x - 1} is the split-index // Volume of this split // const int volSplit = (splitDim/gridDim.x) + plusone; // Start position in this split // const int p0 = (splitDim/gridDim.x)*blockIdx.x + min(blockIdx.x, (splitDim % gridDim.x)); const int posMmkIn0 = p0*cMmSplit; const int posMmkOut0 = p0*cMkSplit; // Volume of split Mmk const int volMmkSplit = volSplit*volMmkUnsplit; // Pre-compute tensor positions in Mmk // 3*numRegStorage registers int posMmkIn[numRegStorage]; int posMmkOut[numRegStorage]; #pragma unroll for (int j=0;j < numRegStorage;j++) { posMmkIn[j] = posMmkIn0; posMmkOut[j] = posMmkOut0; } for (int i=0;i < sizeMmk;i++) { #pragma unroll for (int j=0;j < numRegStorage;j++) { int t = threadIdx.x + j*blockDim.x; posMmkIn[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_in,i)) % __shfl_sync(0xffffffff,Mmk.d_in,i))*__shfl_sync(0xffffffff,Mmk.ct_in,i); posMmkOut[j] += ((t/__shfl_sync(0xffffffff,Mmk.c_out,i)) % __shfl_sync(0xffffffff,Mmk.d_out,i))*__shfl_sync(0xffffffff,Mmk.ct_out,i); } } TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = glMbar[warpLane]; } MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.y;posMbar < volMbar;posMbar+=gridDim.y) { int posMbarOut = ((posMbar/Mbar.c_out) % Mbar.d_out)*Mbar.ct_out; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarOut += __shfl_xor_sync(0xffffffff,posMbarOut,i); } int posMbarIn = ((posMbar/Mbar.c_in) % Mbar.d_in)*Mbar.ct_in; #pragma unroll for (int i=16;i >= 1;i/=2) { posMbarIn += __shfl_xor_sync(0xffffffff,posMbarIn,i); } // Read from global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posIn = posMbarIn + posMmkIn[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gld_tran += countGlTransactions(posIn, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } // Write to global memory #pragma unroll for (int j=0;j < numRegStorage;j++) { int posMmk = threadIdx.x + j*blockDim.x; int posOut = posMbarOut + posMmkOut[j]; int n = __popc(__ballot_sync(0xffffffff,posMmk < volMmkSplit)); memStat.gst_tran += countGlTransactions(posOut, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); if (posMmk < volMmkSplit) shSegOut[posMmk] = posOut / cacheWidth; // countCacheLines(posOut, n, cacheWidth, warpLane, memStat.cl_full, memStat.cl_part); } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, cacheWidth, memStat.cl_full_l2, memStat.cl_part_l2); // Go from L2 segments to L1 segments __syncthreads(); const int L2toL1 = accWidth/cacheWidth; for (int i=threadIdx.x;i < volMmkSplit;i+=blockDim.x) { shSegOut[i] /= L2toL1; } __syncthreads(); countCacheLines(shSegOut, volMmkSplit, accWidth, memStat.cl_full_l1, memStat.cl_part_l1); // __syncthreads(); // memStat.l1_tran += countGlTransactions(shSegOut, volMmkSplit); } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } // // Transpose when the lead dimension is the same, e.g. (1, 2, 3) -> (1, 3, 2) // // dim3 numthread(TILEDIM, TILEROWS, 1); // dim3 numblock( ((plan.volMm-1)/TILEDIM+1)*((plan.volMkBar-1)/TILEDIM+1), 1, plan.volMbar); // __global__ void __launch_bounds__(TILEDIM*TILEROWS, 1) countTiledCopy( const int numMm, const int volMbar, const int sizeMbar, const int cuDimMk, const int cuDimMm, const int2 tiledVol, const TensorConvInOut* RESTRICT gl_Mbar, const int accWidth, const int cacheWidth, MemStat* RESTRICT glMemStat) { const int warpLane = threadIdx.x & (warpSize - 1); TensorConvInOut Mbar; Mbar.c_in = 1; Mbar.d_in = 1; Mbar.c_out = 1; Mbar.d_out = 1; if (warpLane < sizeMbar) { Mbar = gl_Mbar[warpLane]; } const int bx = (blockIdx.x % numMm)*TILEDIM; const int by = (blockIdx.x / numMm)*TILEDIM; const int x = bx + threadIdx.x; const int y = by + threadIdx.y; MemStat memStat; memStat.clear(); for (int posMbar=blockIdx.z;posMbar < volMbar;posMbar += gridDim.z) { // Read global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_in, Mbar.d_in, Mbar.ct_in); pos0 += x + y*cuDimMk; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMk; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gld_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gld_req += __any_sync(0xffffffff,n > 0); } } // Write global memory { int pos0 = tensorPos(posMbar, sizeMbar, Mbar.c_out, Mbar.d_out, Mbar.ct_out); pos0 += x + y*cuDimMm; #pragma unroll for (int j=0;j < TILEDIM;j += TILEROWS) { int pos = pos0 + j*cuDimMm; int n = __popc(__ballot_sync(0xffffffff,(x < tiledVol.x) && (y + j < tiledVol.y))); memStat.gst_tran += countGlTransactions(pos, n, accWidth, warpLane); memStat.gst_req += __any_sync(0xffffffff,n > 0); countCacheLines(pos, n, cacheWidth, warpLane, memStat.cl_full_l2, memStat.cl_part_l2); } } } // Reduce memStat within thread block and write result to global memory writeMemStat(warpLane, memStat, glMemStat); } //###################################################################################### //###################################################################################### //###################################################################################### void runCounters(const int warpSize, const int* hostPosData, const int numPosData, const int accWidth, const int cacheWidth, int* host_tran, int* host_cl_full, int* host_cl_part) { const int numWarp = numPosData/warpSize; int* devPosData; allocate_device<int>(&devPosData, numPosData); copy_HtoD<int>(hostPosData, devPosData, numPosData); int* dev_tran; int* dev_cl_full; int* dev_cl_part; allocate_device<int>(&dev_tran, numWarp); allocate_device<int>(&dev_cl_full, numWarp); allocate_device<int>(&dev_cl_part, numWarp); int nthread = 512; int nblock = (numPosData - 1)/nthread + 1; runCountersKernel<<< nblock, nthread >>>(devPosData, numPosData, accWidth, cacheWidth, dev_tran, dev_cl_full, dev_cl_part); cudaCheck(cudaGetLastError()); copy_DtoH<int>(dev_tran, host_tran, numWarp); copy_DtoH<int>(dev_cl_full, host_cl_full, numWarp); copy_DtoH<int>(dev_cl_part, host_cl_part, numWarp); cudaCheck(cudaDeviceSynchronize()); deallocate_device<int>(&dev_tran); deallocate_device<int>(&dev_cl_full); deallocate_device<int>(&dev_cl_part); deallocate_device<int>(&devPosData); } bool cuttGpuModelKernel(cuttPlan_t& plan, const int accWidth, const int cacheWidth, int& gld_tran, int& gst_tran, int& gld_req, int& gst_req, int& cl_full_l2, int& cl_part_l2, int& cl_full_l1, int& cl_part_l1) { LaunchConfig& lc = plan.launchConfig; TensorSplit& ts = plan.tensorSplit; MemStat* devMemStat; allocate_device<MemStat>(&devMemStat, 1); set_device_array<MemStat>(devMemStat, 0, 1, plan.stream); switch(ts.method) { case Trivial: { return false; } case Packed: { switch(lc.numRegStorage) { #define CALL0(NREG) \ countPacked<NREG> <<< lc.numblock, lc.numthread, ts.volMmk*sizeof(int), plan.stream >>> \ (ts.volMmk, ts.volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case PackedSplit: { // Calculate max. volume of split Mmk const int volSplit = (ts.splitDim/ts.numSplit) + ((ts.splitDim % ts.numSplit) != 0); const int volMmkSplit = volSplit*ts.volMmkUnsplit; switch(lc.numRegStorage) { #define CALL0(NREG) \ countPackedSplit<NREG> <<< lc.numblock, lc.numthread, volMmkSplit*sizeof(int), plan.stream >>> \ (ts.splitDim, ts.volMmkUnsplit, ts. volMbar, ts.sizeMmk, ts.sizeMbar, \ plan.cuDimMm, plan.cuDimMk, plan.Mmk, plan.Mbar, accWidth, cacheWidth, devMemStat) #define CALL(ICASE) case ICASE: CALL0(ICASE); break #include "calls.h" default: printf("cuttGpuModelKernel no template implemented for numRegStorage %d\n", lc.numRegStorage); return false; #undef CALL #undef CALL0 } } break; case Tiled: { countTiled <<< lc.numblock, lc.numthread, 0, plan.stream >>> (((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.tiledVol, plan.cuDimMk, plan.cuDimMm, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; case TiledCopy: { countTiledCopy <<< lc.numblock, lc.numthread, 0, plan.stream >>> (((ts.volMm - 1)/TILEDIM + 1), ts.volMbar, ts.sizeMbar, plan.cuDimMk, plan.cuDimMm, plan.tiledVol, plan.Mbar, accWidth, cacheWidth, devMemStat); } break; } cudaCheck(cudaGetLastError()); MemStat hostMemStat; copy_DtoH<MemStat>(devMemStat, &hostMemStat, 1, plan.stream); cudaCheck(cudaDeviceSynchronize()); deallocate_device<MemStat>(&devMemStat); gld_tran = hostMemStat.gld_tran; gst_tran = hostMemStat.gst_tran; gld_req = hostMemStat.gld_req; gst_req = hostMemStat.gst_req; cl_full_l2 = hostMemStat.cl_full_l2; cl_part_l2 = hostMemStat.cl_part_l2; cl_full_l1 = hostMemStat.cl_full_l1; cl_part_l1 = hostMemStat.cl_part_l1; // l1_tran = hostMemStat.l1_tran; return true; }
a95808f4fcb017e3989e5843b469a4cb9073f087.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zlascl2.cu normal z -> d, Fri Sep 11 18:29:21 2015 @author Theo Mary */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( magma_ceildiv( m, NB ) ); dim3 threads( NB ); if (type == MagmaLower) { hipLaunchKernelGGL(( dlascl2_lower) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( dlascl2_upper) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } else if (type == MagmaFull) { hipLaunchKernelGGL(( dlascl2_full) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } } /** @see magmablas_dlascl2_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_dlascl2_q( type, m, n, dD, dA, ldda, magma_stream, info ); }
a95808f4fcb017e3989e5843b469a4cb9073f087.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zlascl2.cu normal z -> d, Fri Sep 11 18:29:21 2015 @author Theo Mary */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( magma_ceildiv( m, NB ) ); dim3 threads( NB ); if (type == MagmaLower) { dlascl2_lower <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } else if (type == MagmaUpper) { dlascl2_upper <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } else if (type == MagmaFull) { dlascl2_full <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } } /** @see magmablas_dlascl2_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_dlascl2_q( type, m, n, dD, dA, ldda, magma_stream, info ); }
8d3ac94a756a2530d0ad297d6876e99d2e1b712e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void factorKernel(float *w, int N) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int idx = ix * 2; int izx = N + idx; const float pi = 3.141592653589793238462643383; float aw = (2.0 * pi) / (float)N; float arg = aw * (float)ix; /* Twiddle factors are symmetric along N/2. with change in sign, due to 180 degree phase change */ if (idx < N) { w[idx] = cos(arg); w[idx + 1] = sin(arg); w[izx] = (-1) * w[idx]; w[izx+1] = (-1) * w[idx + 1]; } }
8d3ac94a756a2530d0ad297d6876e99d2e1b712e.cu
#include "includes.h" __global__ void factorKernel(float *w, int N) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int idx = ix * 2; int izx = N + idx; const float pi = 3.141592653589793238462643383; float aw = (2.0 * pi) / (float)N; float arg = aw * (float)ix; /* Twiddle factors are symmetric along N/2. with change in sign, due to 180 degree phase change */ if (idx < N) { w[idx] = cos(arg); w[idx + 1] = sin(arg); w[izx] = (-1) * w[idx]; w[izx+1] = (-1) * w[idx + 1]; } }
b56303f73ce645a584b713588c999e6f1ec2deec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * GridTools * * Copyright (c) 2014-2021, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include <gridtools/stencil/gpu/shared_allocator.hpp> #include <gtest/gtest.h> #include <gridtools/meta.hpp> #include <cuda_test_helper.hpp> namespace gridtools { namespace stencil { namespace gpu_backend { namespace { template <typename PtrHolder> __device__ uint64_t get_ptr(PtrHolder ptr_holder) { return reinterpret_cast<uint64_t>(ptr_holder()); } TEST(shared_allocator, alignment) { shared_allocator allocator; EXPECT_EQ(0, allocator.size()); using alloc1_t = char[14]; using alloc2_t = double; using alloc3_t = double; auto alloc1 = allocate(allocator, meta::lazy::id<alloc1_t>{}, 7); auto alloc2 = allocate(allocator, meta::lazy::id<alloc2_t>{}, 4); auto alloc3 = allocate(allocator, meta::lazy::id<alloc3_t>{}, 1); auto ptr1 = on_device::exec_with_shared_memory( allocator.size(), GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&get_ptr<decltype(alloc1)>), alloc1); auto ptr2 = on_device::exec_with_shared_memory( allocator.size(), GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&get_ptr<decltype(alloc2)>), alloc2); auto ptr3 = on_device::exec_with_shared_memory( allocator.size(), GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&get_ptr<decltype(alloc3)>), alloc3); // check alignment for all allocations EXPECT_EQ(ptr1 % alignof(alloc1_t), 0); EXPECT_EQ(ptr2 % alignof(alloc2_t), 0); EXPECT_EQ(ptr3 % alignof(alloc3_t), 0); } template <class PtrHolderFloat, class PtrHolderInt> __global__ void fill_and_check_test( PtrHolderFloat holder1, PtrHolderFloat holder1_shifted, PtrHolderInt holder2, bool *result) { static_assert(std::is_same_v<decltype(holder1()), float *>); static_assert(std::is_same_v<decltype(holder1_shifted()), float *>); static_assert(std::is_same_v<decltype(holder2()), int16_t *>); auto ptr1 = holder1(); auto ptr1_shifted = holder1_shifted(); auto ptr2 = holder2(); ptr1[threadIdx.x] = 100 * blockIdx.x + threadIdx.x; ptr1_shifted[threadIdx.x] = 10000 + 100 * blockIdx.x + threadIdx.x; ptr2[threadIdx.x] = 20000 + 100 * blockIdx.x + threadIdx.x; __syncthreads(); if (threadIdx.x == 0) { bool local_result = true; for (int i = 0; i < 32; ++i) { local_result &= (ptr1[i] == 100 * blockIdx.x + i && ptr1[i + 32] == 10000 + 100 * blockIdx.x + i && ptr2[i] == 20000 + 100 * blockIdx.x + i); } result[blockIdx.x] = local_result; } } TEST(shared_allocator, fill_and_check) { shared_allocator allocator; auto float_ptr = allocate(allocator, meta::lazy::id<float>{}, 64); auto int_ptr = allocate(allocator, meta::lazy::id<int16_t>{}, 32); bool *result; GT_CUDA_CHECK(hipMallocManaged(&result, 2 * sizeof(bool))); hipLaunchKernelGGL(( fill_and_check_test), dim3(2), dim3(32), allocator.size(), 0, float_ptr, (float_ptr + 48) + (-16), int_ptr, result); GT_CUDA_CHECK(hipDeviceSynchronize()); EXPECT_TRUE(result[0]); EXPECT_TRUE(result[1]); GT_CUDA_CHECK(hipFree(result)); } } // namespace } // namespace gpu_backend } // namespace stencil } // namespace gridtools
b56303f73ce645a584b713588c999e6f1ec2deec.cu
/* * GridTools * * Copyright (c) 2014-2021, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include <gridtools/stencil/gpu/shared_allocator.hpp> #include <gtest/gtest.h> #include <gridtools/meta.hpp> #include <cuda_test_helper.hpp> namespace gridtools { namespace stencil { namespace gpu_backend { namespace { template <typename PtrHolder> __device__ uint64_t get_ptr(PtrHolder ptr_holder) { return reinterpret_cast<uint64_t>(ptr_holder()); } TEST(shared_allocator, alignment) { shared_allocator allocator; EXPECT_EQ(0, allocator.size()); using alloc1_t = char[14]; using alloc2_t = double; using alloc3_t = double; auto alloc1 = allocate(allocator, meta::lazy::id<alloc1_t>{}, 7); auto alloc2 = allocate(allocator, meta::lazy::id<alloc2_t>{}, 4); auto alloc3 = allocate(allocator, meta::lazy::id<alloc3_t>{}, 1); auto ptr1 = on_device::exec_with_shared_memory( allocator.size(), GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&get_ptr<decltype(alloc1)>), alloc1); auto ptr2 = on_device::exec_with_shared_memory( allocator.size(), GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&get_ptr<decltype(alloc2)>), alloc2); auto ptr3 = on_device::exec_with_shared_memory( allocator.size(), GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&get_ptr<decltype(alloc3)>), alloc3); // check alignment for all allocations EXPECT_EQ(ptr1 % alignof(alloc1_t), 0); EXPECT_EQ(ptr2 % alignof(alloc2_t), 0); EXPECT_EQ(ptr3 % alignof(alloc3_t), 0); } template <class PtrHolderFloat, class PtrHolderInt> __global__ void fill_and_check_test( PtrHolderFloat holder1, PtrHolderFloat holder1_shifted, PtrHolderInt holder2, bool *result) { static_assert(std::is_same_v<decltype(holder1()), float *>); static_assert(std::is_same_v<decltype(holder1_shifted()), float *>); static_assert(std::is_same_v<decltype(holder2()), int16_t *>); auto ptr1 = holder1(); auto ptr1_shifted = holder1_shifted(); auto ptr2 = holder2(); ptr1[threadIdx.x] = 100 * blockIdx.x + threadIdx.x; ptr1_shifted[threadIdx.x] = 10000 + 100 * blockIdx.x + threadIdx.x; ptr2[threadIdx.x] = 20000 + 100 * blockIdx.x + threadIdx.x; __syncthreads(); if (threadIdx.x == 0) { bool local_result = true; for (int i = 0; i < 32; ++i) { local_result &= (ptr1[i] == 100 * blockIdx.x + i && ptr1[i + 32] == 10000 + 100 * blockIdx.x + i && ptr2[i] == 20000 + 100 * blockIdx.x + i); } result[blockIdx.x] = local_result; } } TEST(shared_allocator, fill_and_check) { shared_allocator allocator; auto float_ptr = allocate(allocator, meta::lazy::id<float>{}, 64); auto int_ptr = allocate(allocator, meta::lazy::id<int16_t>{}, 32); bool *result; GT_CUDA_CHECK(cudaMallocManaged(&result, 2 * sizeof(bool))); fill_and_check_test<<<2, 32, allocator.size()>>>( float_ptr, (float_ptr + 48) + (-16), int_ptr, result); GT_CUDA_CHECK(cudaDeviceSynchronize()); EXPECT_TRUE(result[0]); EXPECT_TRUE(result[1]); GT_CUDA_CHECK(cudaFree(result)); } } // namespace } // namespace gpu_backend } // namespace stencil } // namespace gridtools
274304928de928581cf2f0841daa23535121fe49.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/column_wrapper.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_lists.hpp> #include <cudf/io/functions.hpp> #include <cudf/io/data_sink.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/concatenate.hpp> #include <fstream> #include <type_traits> namespace cudf_io = cudf::experimental::io; template <typename T> using column_wrapper = typename std::conditional<std::is_same<T, cudf::string_view>::value, cudf::test::strings_column_wrapper, cudf::test::fixed_width_column_wrapper<T>>::type; using column = cudf::column; using table = cudf::experimental::table; using table_view = cudf::table_view; // Global environment for temporary files auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>( ::testing::AddGlobalTestEnvironment( new cudf::test::TempDirTestEnvironment)); template<typename T, typename Elements> std::unique_ptr<cudf::experimental::table> create_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity, Elements elements) { auto valids = cudf::test::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0 ? true : false; } ); std::vector<cudf::test::fixed_width_column_wrapper<T>> src_cols(num_columns); for(int idx=0; idx<num_columns; idx++){ if(include_validity){ src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows, valids); } else { src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows); } } std::vector<std::unique_ptr<cudf::column>> columns(num_columns); std::transform(src_cols.begin(), src_cols.end(), columns.begin(), [](cudf::test::fixed_width_column_wrapper<T> &in){ auto ret = in.release(); ret->has_nulls(); return ret; }); return std::make_unique<cudf::experimental::table>(std::move(columns)); } template<typename T> std::unique_ptr<cudf::experimental::table> create_random_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity) { auto rand_elements = cudf::test::make_counting_transform_iterator(0, [](T i){return rand();}); return create_fixed_table<T>(num_columns, num_rows, include_validity, rand_elements); } template<typename T> std::unique_ptr<cudf::experimental::table> create_compressible_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, cudf::size_type period, bool include_validity) { auto compressible_elements = cudf::test::make_counting_transform_iterator(0, [period](T i){ return i / period; }); return create_fixed_table<T>(num_columns, num_rows, include_validity, compressible_elements); } // Base test fixture for tests struct ParquetWriterTest : public cudf::test::BaseFixture {}; // Base test fixture for "stress" tests struct ParquetWriterStressTest : public cudf::test::BaseFixture {}; // Typed test fixture for numeric type tests template <typename T> struct ParquetWriterNumericTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; // Typed test fixture for timestamp type tests template <typename T> struct ParquetWriterTimestampTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetWriterNumericTypeTest, cudf::test::NumericTypes); using SupportedTimestampTypes = cudf::test::TimestampTypes; TYPED_TEST_CASE(ParquetWriterTimestampTypeTest, SupportedTimestampTypes); // Base test fixture for chunked writer tests struct ParquetChunkedWriterTest : public cudf::test::BaseFixture {}; // Typed test fixture for numeric type tests template <typename T> struct ParquetChunkedWriterNumericTypeTest : public ParquetChunkedWriterTest { auto type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetChunkedWriterNumericTypeTest, cudf::test::NumericTypes); namespace { // Generates a vector of uniform random values of type T template <typename T> inline auto random_values(size_t size) { std::vector<T> values(size); using T1 = T; using uniform_distribution = typename std::conditional_t< std::is_same<T1, bool>::value, std::bernoulli_distribution, std::conditional_t<std::is_floating_point<T1>::value, std::uniform_real_distribution<T1>, std::uniform_int_distribution<T1>>>; static constexpr auto seed = 0xf00d; static std::mt19937 engine{seed}; static uniform_distribution dist{}; std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; }); return values; } // Helper function to compare two tables void expect_tables_equal(cudf::table_view const& lhs, cudf::table_view const& rhs) { EXPECT_EQ(lhs.num_columns(), rhs.num_columns()); auto expected = lhs.begin(); auto result = rhs.begin(); while (result != rhs.end()) { cudf::test::expect_columns_equal(*expected++, *result++); } } } // namespace TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumn) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumn.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumnWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i % 2); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, Timestamps) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Timestamps.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, TimestampsWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i > 30) && (i < 60); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("TimestampsWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TEST_F(ParquetWriterTest, MultiColumn) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), validity}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), validity}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), validity}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), validity}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), validity}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), validity}; cudf_io::table_metadata expected_metadata; //expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumn.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, MultiColumnWithNulls) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); // auto col0_mask = cudf::test::make_counting_transform_iterator( // 0, [](auto i) { return (i % 2); }); auto col1_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i < 10); }); auto col2_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); auto col3_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i == (num_rows - 1)); }); auto col4_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i >= 40 || i <= 60); }); auto col5_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i > 80); }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), col0_mask}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), col1_mask}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), col2_mask}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), col3_mask}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), col4_mask}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), col5_mask}; cudf_io::table_metadata expected_metadata; //expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, Strings) { std::vector<const char*> strings{"Monday", "Monday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"}; const auto num_rows = strings.size(); auto seq_col0 = random_values<int>(num_rows); auto seq_col2 = random_values<float>(num_rows); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); column_wrapper<int> col0{seq_col0.begin(), seq_col0.end(), validity}; column_wrapper<cudf::string_view> col1{strings.begin(), strings.end()}; column_wrapper<float> col2{seq_col2.begin(), seq_col2.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); expected_metadata.column_names.emplace_back("col_string"); expected_metadata.column_names.emplace_back("col_another"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(3, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Strings.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, HostBuffer) { constexpr auto num_rows = 100 << 10; const auto seq_col = random_values<int>(num_rows); const auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); column_wrapper<int> col{seq_col.begin(), seq_col.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); const auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); std::vector<char> out_buffer; cudf_io::write_parquet_args out_args{cudf_io::sink_info(&out_buffer), expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info(out_buffer.data(), out_buffer.size())}; const auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, NonNullable) { srand(31337); auto expected = create_random_fixed_table<int>(9, 9, false); auto filepath = temp_env->get_temp_filepath("NonNullable.parquet"); cudf_io::write_parquet_args args{cudf_io::sink_info{filepath}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom data sink that supports device writes. uses plain file io. class custom_test_data_sink : public cudf::io::data_sink { public: explicit custom_test_data_sink(std::string const& filepath){ outfile_.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc); CUDF_EXPECTS(outfile_.is_open(), "Cannot open output file"); } virtual ~custom_test_data_sink() { flush(); } void host_write(void const* data, size_t size) override { outfile_.write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return true; } void device_write(void const* gpu_data, size_t size, hipStream_t stream){ char *ptr = nullptr; CUDA_TRY(hipHostMalloc(&ptr, size)); CUDA_TRY(hipMemcpyAsync(ptr, gpu_data, size, hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); outfile_.write(reinterpret_cast<char const*>(ptr), size); hipHostFree(ptr); } void flush() override { outfile_.flush(); } size_t bytes_written() override { return outfile_.tellp(); } private: std::ofstream outfile_; }; TEST_F(ParquetWriterTest, CustomDataSink) { auto filepath = temp_env->get_temp_filepath("CustomDataSink.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::experimental::io; srand(31337); auto expected = create_random_fixed_table<int>(5, 10, false); // write out using the custom sink { cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); } // write out using a memmapped sink std::vector<char> buf_sink; { cudf_io::write_parquet_args args{cudf_io::sink_info{&buf_sink}, *expected}; cudf_io::write_parquet(args); } // read them back in and make sure everything matches cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); cudf_io::read_parquet_args buf_args{cudf_io::source_info{buf_sink.data(), buf_sink.size()}}; auto buf_tbl = cudf_io::read_parquet(buf_args); expect_tables_equal(buf_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterTest, DeviceWriteLargeishFile) { auto filepath = temp_env->get_temp_filepath("DeviceWriteLargeishFile.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(4, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetChunkedWriterTest, SingleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto filepath = temp_env->get_temp_filepath("ChunkedSingle.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *table1); } TEST_F(ParquetChunkedWriterTest, SimpleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto table2 = create_random_fixed_table<int>(5, 5, true); auto full_table = cudf::experimental::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedSimple.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, LargeTables) { srand(31337); auto table1 = create_random_fixed_table<int>(512, 4096, true); auto table2 = create_random_fixed_table<int>(512, 8192, true); auto full_table = cudf::experimental::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedLarge.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, ManyTables) { srand(31337); std::vector<std::unique_ptr<table>> tables; std::vector<table_view> table_views; constexpr int num_tables = 96; for(int idx=0; idx<num_tables; idx++){ auto tbl = create_random_fixed_table<int>(16, 64, true); table_views.push_back(*tbl); tables.push_back(std::move(tbl)); } auto expected = cudf::experimental::concatenate(table_views); auto filepath = temp_env->get_temp_filepath("ChunkedManyTables.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); std::for_each(table_views.begin(), table_views.end(), [&state](table_view const& tbl){ cudf_io::write_parquet_chunked(tbl, state); }); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, Strings) { std::vector<std::unique_ptr<cudf::column>> cols; bool mask1[] = { 1, 1, 0, 1, 1, 1, 1 }; std::vector<const char*> h_strings1 { "four", "score", "and", "seven", "years", "ago", "abcdefgh" }; cudf::test::strings_column_wrapper strings1( h_strings1.begin(), h_strings1.end(), mask1 ); cols.push_back(strings1.release()); cudf::experimental::table tbl1(std::move(cols)); bool mask2[] = { 0, 1, 1, 1, 1, 1, 1 }; std::vector<const char*> h_strings2 { "ooooo", "ppppppp", "fff", "j", "cccc", "bbb", "zzzzzzzzzzz" }; cudf::test::strings_column_wrapper strings2( h_strings2.begin(), h_strings2.end(), mask2 ); cols.push_back(strings2.release()); cudf::experimental::table tbl2(std::move(cols)); auto expected = cudf::experimental::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedStrings.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, MismatchedTypes) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(4, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedTypes.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TEST_F(ParquetChunkedWriterTest, MismatchedStructure) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(3, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedStructure.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize) { // write out two 31 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 31; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; T c1a[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }; T c1b[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::experimental::table tbl1(std::move(cols)); T c2a[] = { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; T c2b[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::experimental::table tbl2(std::move(cols)); auto expected = cudf::experimental::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize2) { // write out two 33 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 33; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; T c1a[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }; T c1b[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::experimental::table tbl1(std::move(cols)); T c2a[] = { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; T c2b[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::experimental::table tbl2(std::move(cols)); auto expected = cudf::experimental::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize2.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom mem mapped data sink that supports device writes template<bool supports_device_writes> class custom_test_memmap_sink : public cudf::io::data_sink { public: explicit custom_test_memmap_sink(std::vector<char>* mm_writer_buf){ mm_writer = cudf::io::data_sink::create(mm_writer_buf); } virtual ~custom_test_memmap_sink() { mm_writer->flush(); } void host_write(void const* data, size_t size) override { mm_writer->host_write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return supports_device_writes; } void device_write(void const* gpu_data, size_t size, hipStream_t stream){ char *ptr = nullptr; CUDA_TRY(hipHostMalloc(&ptr, size)); CUDA_TRY(hipMemcpyAsync(ptr, gpu_data, size, hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); mm_writer->host_write(reinterpret_cast<char const*>(ptr), size); hipHostFree(ptr); } void flush() override { mm_writer->flush(); } size_t bytes_written() override { return mm_writer->bytes_written(); } private: std::unique_ptr<data_sink> mm_writer; }; TEST_F(ParquetWriterStressTest, LargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); }
274304928de928581cf2f0841daa23535121fe49.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/column_wrapper.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_lists.hpp> #include <cudf/io/functions.hpp> #include <cudf/io/data_sink.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/concatenate.hpp> #include <fstream> #include <type_traits> namespace cudf_io = cudf::experimental::io; template <typename T> using column_wrapper = typename std::conditional<std::is_same<T, cudf::string_view>::value, cudf::test::strings_column_wrapper, cudf::test::fixed_width_column_wrapper<T>>::type; using column = cudf::column; using table = cudf::experimental::table; using table_view = cudf::table_view; // Global environment for temporary files auto const temp_env = static_cast<cudf::test::TempDirTestEnvironment*>( ::testing::AddGlobalTestEnvironment( new cudf::test::TempDirTestEnvironment)); template<typename T, typename Elements> std::unique_ptr<cudf::experimental::table> create_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity, Elements elements) { auto valids = cudf::test::make_counting_transform_iterator(0, [](auto i) { return i % 2 == 0 ? true : false; } ); std::vector<cudf::test::fixed_width_column_wrapper<T>> src_cols(num_columns); for(int idx=0; idx<num_columns; idx++){ if(include_validity){ src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows, valids); } else { src_cols[idx] = cudf::test::fixed_width_column_wrapper<T>(elements, elements + num_rows); } } std::vector<std::unique_ptr<cudf::column>> columns(num_columns); std::transform(src_cols.begin(), src_cols.end(), columns.begin(), [](cudf::test::fixed_width_column_wrapper<T> &in){ auto ret = in.release(); ret->has_nulls(); return ret; }); return std::make_unique<cudf::experimental::table>(std::move(columns)); } template<typename T> std::unique_ptr<cudf::experimental::table> create_random_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, bool include_validity) { auto rand_elements = cudf::test::make_counting_transform_iterator(0, [](T i){return rand();}); return create_fixed_table<T>(num_columns, num_rows, include_validity, rand_elements); } template<typename T> std::unique_ptr<cudf::experimental::table> create_compressible_fixed_table(cudf::size_type num_columns, cudf::size_type num_rows, cudf::size_type period, bool include_validity) { auto compressible_elements = cudf::test::make_counting_transform_iterator(0, [period](T i){ return i / period; }); return create_fixed_table<T>(num_columns, num_rows, include_validity, compressible_elements); } // Base test fixture for tests struct ParquetWriterTest : public cudf::test::BaseFixture {}; // Base test fixture for "stress" tests struct ParquetWriterStressTest : public cudf::test::BaseFixture {}; // Typed test fixture for numeric type tests template <typename T> struct ParquetWriterNumericTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; // Typed test fixture for timestamp type tests template <typename T> struct ParquetWriterTimestampTypeTest : public ParquetWriterTest { auto type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetWriterNumericTypeTest, cudf::test::NumericTypes); using SupportedTimestampTypes = cudf::test::TimestampTypes; TYPED_TEST_CASE(ParquetWriterTimestampTypeTest, SupportedTimestampTypes); // Base test fixture for chunked writer tests struct ParquetChunkedWriterTest : public cudf::test::BaseFixture {}; // Typed test fixture for numeric type tests template <typename T> struct ParquetChunkedWriterNumericTypeTest : public ParquetChunkedWriterTest { auto type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; // Declare typed test cases TYPED_TEST_CASE(ParquetChunkedWriterNumericTypeTest, cudf::test::NumericTypes); namespace { // Generates a vector of uniform random values of type T template <typename T> inline auto random_values(size_t size) { std::vector<T> values(size); using T1 = T; using uniform_distribution = typename std::conditional_t< std::is_same<T1, bool>::value, std::bernoulli_distribution, std::conditional_t<std::is_floating_point<T1>::value, std::uniform_real_distribution<T1>, std::uniform_int_distribution<T1>>>; static constexpr auto seed = 0xf00d; static std::mt19937 engine{seed}; static uniform_distribution dist{}; std::generate_n(values.begin(), size, [&]() { return T{dist(engine)}; }); return values; } // Helper function to compare two tables void expect_tables_equal(cudf::table_view const& lhs, cudf::table_view const& rhs) { EXPECT_EQ(lhs.num_columns(), rhs.num_columns()); auto expected = lhs.begin(); auto result = rhs.begin(); while (result != rhs.end()) { cudf::test::expect_columns_equal(*expected++, *result++); } } } // namespace TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumn) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumn.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterNumericTypeTest, SingleColumnWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam(i); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i % 2); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("SingleColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, Timestamps) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Timestamps.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TYPED_TEST(ParquetWriterTimestampTypeTest, TimestampsWithNulls) { auto sequence = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return TypeParam((std::rand() / 10000) * 1000); }); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i > 30) && (i < 60); }); constexpr auto num_rows = 100; column_wrapper<TypeParam> col(sequence, sequence + num_rows, validity); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("TimestampsWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view()}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; in_args.timestamp_type = this->type(); auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); } TEST_F(ParquetWriterTest, MultiColumn) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), validity}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), validity}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), validity}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), validity}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), validity}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), validity}; cudf_io::table_metadata expected_metadata; //expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumn.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, MultiColumnWithNulls) { constexpr auto num_rows = 100; // auto col0_data = random_values<bool>(num_rows); auto col1_data = random_values<int8_t>(num_rows); auto col2_data = random_values<int16_t>(num_rows); auto col3_data = random_values<int32_t>(num_rows); auto col4_data = random_values<float>(num_rows); auto col5_data = random_values<double>(num_rows); // auto col0_mask = cudf::test::make_counting_transform_iterator( // 0, [](auto i) { return (i % 2); }); auto col1_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i < 10); }); auto col2_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); auto col3_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i == (num_rows - 1)); }); auto col4_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i >= 40 || i <= 60); }); auto col5_mask = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return (i > 80); }); // column_wrapper<bool> col0{ // col0_data.begin(), col0_data.end(), col0_mask}; column_wrapper<int8_t> col1{col1_data.begin(), col1_data.end(), col1_mask}; column_wrapper<int16_t> col2{col2_data.begin(), col2_data.end(), col2_mask}; column_wrapper<int32_t> col3{col3_data.begin(), col3_data.end(), col3_mask}; column_wrapper<float> col4{col4_data.begin(), col4_data.end(), col4_mask}; column_wrapper<double> col5{col5_data.begin(), col5_data.end(), col5_mask}; cudf_io::table_metadata expected_metadata; //expected_metadata.column_names.emplace_back("bools"); expected_metadata.column_names.emplace_back("int8s"); expected_metadata.column_names.emplace_back("int16s"); expected_metadata.column_names.emplace_back("int32s"); expected_metadata.column_names.emplace_back("floats"); expected_metadata.column_names.emplace_back("doubles"); std::vector<std::unique_ptr<column>> cols; // cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); cols.push_back(col3.release()); cols.push_back(col4.release()); cols.push_back(col5.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(5, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("MultiColumnWithNulls.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, Strings) { std::vector<const char*> strings{"Monday", "Monday", "Friday", "Monday", "Friday", "Friday", "Friday", "Funday"}; const auto num_rows = strings.size(); auto seq_col0 = random_values<int>(num_rows); auto seq_col2 = random_values<float>(num_rows); auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); column_wrapper<int> col0{seq_col0.begin(), seq_col0.end(), validity}; column_wrapper<cudf::string_view> col1{strings.begin(), strings.end()}; column_wrapper<float> col2{seq_col2.begin(), seq_col2.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); expected_metadata.column_names.emplace_back("col_string"); expected_metadata.column_names.emplace_back("col_another"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col0.release()); cols.push_back(col1.release()); cols.push_back(col2.release()); auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(3, expected->num_columns()); auto filepath = temp_env->get_temp_filepath("Strings.parquet"); cudf_io::write_parquet_args out_args{cudf_io::sink_info{filepath}, expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, HostBuffer) { constexpr auto num_rows = 100 << 10; const auto seq_col = random_values<int>(num_rows); const auto validity = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return true; }); column_wrapper<int> col{seq_col.begin(), seq_col.end(), validity}; cudf_io::table_metadata expected_metadata; expected_metadata.column_names.emplace_back("col_other"); std::vector<std::unique_ptr<column>> cols; cols.push_back(col.release()); const auto expected = std::make_unique<table>(std::move(cols)); EXPECT_EQ(1, expected->num_columns()); std::vector<char> out_buffer; cudf_io::write_parquet_args out_args{cudf_io::sink_info(&out_buffer), expected->view(), &expected_metadata}; cudf_io::write_parquet(out_args); cudf_io::read_parquet_args in_args{cudf_io::source_info(out_buffer.data(), out_buffer.size())}; const auto result = cudf_io::read_parquet(in_args); expect_tables_equal(expected->view(), result.tbl->view()); EXPECT_EQ(expected_metadata.column_names, result.metadata.column_names); } TEST_F(ParquetWriterTest, NonNullable) { srand(31337); auto expected = create_random_fixed_table<int>(9, 9, false); auto filepath = temp_env->get_temp_filepath("NonNullable.parquet"); cudf_io::write_parquet_args args{cudf_io::sink_info{filepath}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom data sink that supports device writes. uses plain file io. class custom_test_data_sink : public cudf::io::data_sink { public: explicit custom_test_data_sink(std::string const& filepath){ outfile_.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc); CUDF_EXPECTS(outfile_.is_open(), "Cannot open output file"); } virtual ~custom_test_data_sink() { flush(); } void host_write(void const* data, size_t size) override { outfile_.write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return true; } void device_write(void const* gpu_data, size_t size, cudaStream_t stream){ char *ptr = nullptr; CUDA_TRY(cudaMallocHost(&ptr, size)); CUDA_TRY(cudaMemcpyAsync(ptr, gpu_data, size, cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); outfile_.write(reinterpret_cast<char const*>(ptr), size); cudaFreeHost(ptr); } void flush() override { outfile_.flush(); } size_t bytes_written() override { return outfile_.tellp(); } private: std::ofstream outfile_; }; TEST_F(ParquetWriterTest, CustomDataSink) { auto filepath = temp_env->get_temp_filepath("CustomDataSink.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::experimental::io; srand(31337); auto expected = create_random_fixed_table<int>(5, 10, false); // write out using the custom sink { cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); } // write out using a memmapped sink std::vector<char> buf_sink; { cudf_io::write_parquet_args args{cudf_io::sink_info{&buf_sink}, *expected}; cudf_io::write_parquet(args); } // read them back in and make sure everything matches cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); cudf_io::read_parquet_args buf_args{cudf_io::source_info{buf_sink.data(), buf_sink.size()}}; auto buf_tbl = cudf_io::read_parquet(buf_args); expect_tables_equal(buf_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterTest, DeviceWriteLargeishFile) { auto filepath = temp_env->get_temp_filepath("DeviceWriteLargeishFile.parquet"); custom_test_data_sink custom_sink(filepath); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(4, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{filepath}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetChunkedWriterTest, SingleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto filepath = temp_env->get_temp_filepath("ChunkedSingle.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *table1); } TEST_F(ParquetChunkedWriterTest, SimpleTable) { srand(31337); auto table1 = create_random_fixed_table<int>(5, 5, true); auto table2 = create_random_fixed_table<int>(5, 5, true); auto full_table = cudf::experimental::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedSimple.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, LargeTables) { srand(31337); auto table1 = create_random_fixed_table<int>(512, 4096, true); auto table2 = create_random_fixed_table<int>(512, 8192, true); auto full_table = cudf::experimental::concatenate({*table1, *table2}); auto filepath = temp_env->get_temp_filepath("ChunkedLarge.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); cudf_io::write_parquet_chunked(*table2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *full_table); } TEST_F(ParquetChunkedWriterTest, ManyTables) { srand(31337); std::vector<std::unique_ptr<table>> tables; std::vector<table_view> table_views; constexpr int num_tables = 96; for(int idx=0; idx<num_tables; idx++){ auto tbl = create_random_fixed_table<int>(16, 64, true); table_views.push_back(*tbl); tables.push_back(std::move(tbl)); } auto expected = cudf::experimental::concatenate(table_views); auto filepath = temp_env->get_temp_filepath("ChunkedManyTables.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); std::for_each(table_views.begin(), table_views.end(), [&state](table_view const& tbl){ cudf_io::write_parquet_chunked(tbl, state); }); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, Strings) { std::vector<std::unique_ptr<cudf::column>> cols; bool mask1[] = { 1, 1, 0, 1, 1, 1, 1 }; std::vector<const char*> h_strings1 { "four", "score", "and", "seven", "years", "ago", "abcdefgh" }; cudf::test::strings_column_wrapper strings1( h_strings1.begin(), h_strings1.end(), mask1 ); cols.push_back(strings1.release()); cudf::experimental::table tbl1(std::move(cols)); bool mask2[] = { 0, 1, 1, 1, 1, 1, 1 }; std::vector<const char*> h_strings2 { "ooooo", "ppppppp", "fff", "j", "cccc", "bbb", "zzzzzzzzzzz" }; cudf::test::strings_column_wrapper strings2( h_strings2.begin(), h_strings2.end(), mask2 ); cols.push_back(strings2.release()); cudf::experimental::table tbl2(std::move(cols)); auto expected = cudf::experimental::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedStrings.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TEST_F(ParquetChunkedWriterTest, MismatchedTypes) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(4, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedTypes.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TEST_F(ParquetChunkedWriterTest, MismatchedStructure) { srand(31337); auto table1 = create_random_fixed_table<int>(4, 4, true); auto table2 = create_random_fixed_table<float>(3, 4, true); auto filepath = temp_env->get_temp_filepath("ChunkedMismatchedStructure.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(*table1, state); EXPECT_THROW(cudf_io::write_parquet_chunked(*table2, state), cudf::logic_error); cudf_io::write_parquet_chunked_end(state); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize) { // write out two 31 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 31; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; T c1a[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }; T c1b[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::experimental::table tbl1(std::move(cols)); T c2a[] = { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; T c2b[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::experimental::table tbl2(std::move(cols)); auto expected = cudf::experimental::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } TYPED_TEST(ParquetChunkedWriterNumericTypeTest, UnalignedSize2) { // write out two 33 row tables and make sure they get // read back with all their validity bits in the right place using T = TypeParam; int num_els = 33; std::vector<std::unique_ptr<cudf::column>> cols; bool mask[] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; T c1a[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }; T c1b[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }; column_wrapper<T> c1a_w(c1a, c1a + num_els, mask); column_wrapper<T> c1b_w(c1b, c1b + num_els, mask); cols.push_back(c1a_w.release()); cols.push_back(c1b_w.release()); cudf::experimental::table tbl1(std::move(cols)); T c2a[] = { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; T c2b[] = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; column_wrapper<T> c2a_w(c2a, c2a + num_els, mask); column_wrapper<T> c2b_w(c2b, c2b + num_els, mask); cols.push_back(c2a_w.release()); cols.push_back(c2b_w.release()); cudf::experimental::table tbl2(std::move(cols)); auto expected = cudf::experimental::concatenate({tbl1, tbl2}); auto filepath = temp_env->get_temp_filepath("ChunkedUnalignedSize2.parquet"); cudf_io::write_parquet_chunked_args args{cudf_io::sink_info{filepath}}; auto state = cudf_io::write_parquet_chunked_begin(args); cudf_io::write_parquet_chunked(tbl1, state); cudf_io::write_parquet_chunked(tbl2, state); cudf_io::write_parquet_chunked_end(state); cudf_io::read_parquet_args read_args{cudf_io::source_info{filepath}}; auto result = cudf_io::read_parquet(read_args); expect_tables_equal(*result.tbl, *expected); } // custom mem mapped data sink that supports device writes template<bool supports_device_writes> class custom_test_memmap_sink : public cudf::io::data_sink { public: explicit custom_test_memmap_sink(std::vector<char>* mm_writer_buf){ mm_writer = cudf::io::data_sink::create(mm_writer_buf); } virtual ~custom_test_memmap_sink() { mm_writer->flush(); } void host_write(void const* data, size_t size) override { mm_writer->host_write(reinterpret_cast<char const*>(data), size); } bool supports_device_write() const override { return supports_device_writes; } void device_write(void const* gpu_data, size_t size, cudaStream_t stream){ char *ptr = nullptr; CUDA_TRY(cudaMallocHost(&ptr, size)); CUDA_TRY(cudaMemcpyAsync(ptr, gpu_data, size, cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); mm_writer->host_write(reinterpret_cast<char const*>(ptr), size); cudaFreeHost(ptr); } void flush() override { mm_writer->flush(); } size_t bytes_written() override { return mm_writer->bytes_written(); } private: std::unique_ptr<data_sink> mm_writer; }; TEST_F(ParquetWriterStressTest, LargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, LargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<false> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWeakCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_random_fixed_table<int>(16, 4 * 1024 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableGoodCompression) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 128 * 1024, false); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); } TEST_F(ParquetWriterStressTest, DeviceWriteLargeTableWithValids) { std::vector<char> mm_buf; mm_buf.reserve(4 * 1024 * 1024 * 16); custom_test_memmap_sink<true> custom_sink(&mm_buf); namespace cudf_io = cudf::experimental::io; // exercises multiple rowgroups srand(31337); auto expected = create_compressible_fixed_table<int>(16, 4 * 1024 * 1024, 6, true); // write out using the custom sink (which uses device writes) cudf_io::write_parquet_args args{cudf_io::sink_info{&custom_sink}, *expected}; cudf_io::write_parquet(args); cudf_io::read_parquet_args custom_args{cudf_io::source_info{mm_buf.data(), mm_buf.size()}}; auto custom_tbl = cudf_io::read_parquet(custom_args); expect_tables_equal(custom_tbl.tbl->view(), expected->view()); }
20afb540671fa1101b42b16c488221e3cf8eab2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "task_maskinactive.cuh" template<typename T> __global__ void d_maskInactiveGroups( DeviceMemory<T>* mem ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx >= mem->fframeW ) return; int groupSize_2 = ( mem->groupSize >> 1 ); int frame = idx / mem->frameW; if( mem->corr[frame] < mem->haltCorrelation ) { mem->groupActive[idx / mem->groupSize] = true; if( idx < groupSize_2 ) return; if( idx >= mem->fframeW - groupSize_2 ) return; mem->groupActive[( idx - groupSize_2 ) / mem->groupSize + mem->nGroups] = true; } } template<typename T> void hd_maskInactiveGroups( DeviceMemory<T>& mem ) { CudaHelper<bool>::setArray(mem.groupActive, false, mem.nGroups * 2); int numBlocks = ( mem.fframeW + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( d_maskInactiveGroups<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, mem.d_mem ); } template void hd_maskInactiveGroups( DeviceMemory<float>& mem ); template void hd_maskInactiveGroups( DeviceMemory<double>& mem );
20afb540671fa1101b42b16c488221e3cf8eab2b.cu
#include "task_maskinactive.cuh" template<typename T> __global__ void d_maskInactiveGroups( DeviceMemory<T>* mem ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx >= mem->fframeW ) return; int groupSize_2 = ( mem->groupSize >> 1 ); int frame = idx / mem->frameW; if( mem->corr[frame] < mem->haltCorrelation ) { mem->groupActive[idx / mem->groupSize] = true; if( idx < groupSize_2 ) return; if( idx >= mem->fframeW - groupSize_2 ) return; mem->groupActive[( idx - groupSize_2 ) / mem->groupSize + mem->nGroups] = true; } } template<typename T> void hd_maskInactiveGroups( DeviceMemory<T>& mem ) { CudaHelper<bool>::setArray(mem.groupActive, false, mem.nGroups * 2); int numBlocks = ( mem.fframeW + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK; d_maskInactiveGroups<T><<<numBlocks, THREADS_PER_BLOCK>>>( mem.d_mem ); } template void hd_maskInactiveGroups( DeviceMemory<float>& mem ); template void hd_maskInactiveGroups( DeviceMemory<double>& mem );
f3e537329fad15bf7d286350ed47bbba257bf5b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file source/nbfmm/model/disk.cu /// @brief The implementation of static disk shape generator. /// /// @author Mu Yang <[email protected]> /// #include <nbfmm/model.hpp> #include <cmath> #include <hiprand/hiprand_kernel.h> #include <thrust/device_vector.h> #include <nbfmm/core/kernel_function.hpp> #include <nbfmm/utility.hpp> /// @addtogroup impl_model /// @{ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Generate static disk shape particles /// /// @param[in] num_particle the number of particles. /// @param[in] center_position the center position. /// @param[in] max_radius the radius. /// @param[in] weight the weight. /// @param[out] position_current the current particle positions. /// @param[out] position_previous the previous particle positions. /// @param[out] weight_ptr the particle weights. /// __global__ void generateDiskStaticDevice( const int num_particle, const float2 center_position, const float max_radius, const float weight, float2* position_current, float2* position_previous, float* weight_ptr ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= num_particle ) { return; } hiprandState_t s; hiprand_init(0, idx, 0, &s); const float radius = (float(idx+1) / num_particle) * max_radius; const float angle_current = 2.0f * M_PI * hiprand_uniform(&s); const float angle_previous = angle_current; position_current[idx] = center_position + radius * make_float2(cosf(angle_current), sinf(angle_current)); position_previous[idx] = center_position + radius * make_float2(cosf(angle_previous), sinf(angle_previous)); weight_ptr[idx] = weight; } /// @} // Generate static disk shape particles void nbfmm::model::generateDiskStatic( const int num_particle, const float2 center_position, const float radius, const float weight, const float tick, float2* gpuptr_position_current, float2* gpuptr_position_previous, float* gpuptr_weight ) { static_cast<void>(tick); assert( num_particle > 0 ); assert( radius > 0 ); assert( weight > 0 ); const int block_dim = kMaxBlockDim; const int grid_dim = ((num_particle-1)/block_dim)+1; hipLaunchKernelGGL(( generateDiskStaticDevice), dim3(grid_dim), dim3(block_dim), 0, 0, num_particle, center_position, radius, weight, gpuptr_position_current, gpuptr_position_previous, gpuptr_weight); }
f3e537329fad15bf7d286350ed47bbba257bf5b8.cu
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file source/nbfmm/model/disk.cu /// @brief The implementation of static disk shape generator. /// /// @author Mu Yang <[email protected]> /// #include <nbfmm/model.hpp> #include <cmath> #include <curand_kernel.h> #include <thrust/device_vector.h> #include <nbfmm/core/kernel_function.hpp> #include <nbfmm/utility.hpp> /// @addtogroup impl_model /// @{ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Generate static disk shape particles /// /// @param[in] num_particle the number of particles. /// @param[in] center_position the center position. /// @param[in] max_radius the radius. /// @param[in] weight the weight. /// @param[out] position_current the current particle positions. /// @param[out] position_previous the previous particle positions. /// @param[out] weight_ptr the particle weights. /// __global__ void generateDiskStaticDevice( const int num_particle, const float2 center_position, const float max_radius, const float weight, float2* position_current, float2* position_previous, float* weight_ptr ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= num_particle ) { return; } curandState s; curand_init(0, idx, 0, &s); const float radius = (float(idx+1) / num_particle) * max_radius; const float angle_current = 2.0f * M_PI * curand_uniform(&s); const float angle_previous = angle_current; position_current[idx] = center_position + radius * make_float2(cosf(angle_current), sinf(angle_current)); position_previous[idx] = center_position + radius * make_float2(cosf(angle_previous), sinf(angle_previous)); weight_ptr[idx] = weight; } /// @} // Generate static disk shape particles void nbfmm::model::generateDiskStatic( const int num_particle, const float2 center_position, const float radius, const float weight, const float tick, float2* gpuptr_position_current, float2* gpuptr_position_previous, float* gpuptr_weight ) { static_cast<void>(tick); assert( num_particle > 0 ); assert( radius > 0 ); assert( weight > 0 ); const int block_dim = kMaxBlockDim; const int grid_dim = ((num_particle-1)/block_dim)+1; generateDiskStaticDevice<<<grid_dim, block_dim>>>(num_particle, center_position, radius, weight, gpuptr_position_current, gpuptr_position_previous, gpuptr_weight); }
40c6eed6bd73dfeeee7c2ad9f70f1929c8034412.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zsymmetrize_tiles.cu normal z -> c, Fri Sep 11 18:29:21 2015 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConjf(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConjf(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_csymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB ); dim3 grid( ntile, magma_ceildiv( m, NB ) ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( csymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( csymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda, mstride, nstride ); } } /** @see magmablas_csymmetrize_tiles_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_csymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { magmablas_csymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream ); }
40c6eed6bd73dfeeee7c2ad9f70f1929c8034412.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zsymmetrize_tiles.cu normal z -> c, Fri Sep 11 18:29:21 2015 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConjf(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConjf(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_csymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB ); dim3 grid( ntile, magma_ceildiv( m, NB ) ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { csymmetrize_tiles_upper<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride ); } else { csymmetrize_tiles_lower<<< grid, threads, 0, queue >>>( m, dA, ldda, mstride, nstride ); } } /** @see magmablas_csymmetrize_tiles_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_csymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { magmablas_csymmetrize_tiles_q( uplo, m, dA, ldda, ntile, mstride, nstride, magma_stream ); }
1b59bc2f4d4eef489c7b71da08ee141ebaf02613.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdbool.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #define V 9 #define E 14 long int* get_graph_dim(char* filename) { FILE* file; file = fopen(filename, "r"); if (file == NULL) { printf("Unable to read the CSR file: %s.", filename); exit(1); } long int* dim = (long int*)malloc(2*sizeof(long int)); fscanf(file, "%ld %ld", &dim[0], &dim[1]); return dim; } long int* read_csr(char* filename, long int v, long int e) { FILE* file; file = fopen(filename, "r"); if (file == NULL) { printf("Unable to read the CSR file: %s.", filename); exit(1); } long int* csr = (long int*)malloc((v+1+(2*e))*sizeof(long int)); fscanf(file, "%ld %ld", &v, &e); long int i; for(i=0; i<v+1; i++) fscanf(file, "%ld", &csr[i]); for(; i<v+1+2*e; i++) fscanf(file, "%ld", &csr[i]); return csr; } __global__ void between_centre(double* bc, long int* R, long int* C, long int *vert, long int *edge) { //long int V = *vert; //long int E = *edge; long int idx = threadIdx.x; long int s = blockIdx.x; __shared__ long int d[V], sigma[V]; double dep[V]; __shared__ long int P[V][V]; __shared__ long int p_top[V]; long int k, r; //Initialize d and sigma // for(k=idx; k<V; k+=blockDim.x) // { // if(k == s) // { // d[k] = 0; // sigma[k] = 1; // } // else // { // d[k] = INT_MAX; // sigma[k] = 0; // } // // p_top[k] = 0; // // } // // __shared__ long int Q[V]; // __shared__ long int Q2[V]; // __shared__ long int Q_len; // __shared__ long int Q2_len; // // __shared__ long int S[V]; // __shared__ long int s_top; // // if(idx == 0) // { // Q[0] = s; // Q_len = 1; // Q2_len = 0; // s_top = 0; // } // __syncthreads(); // // while(1) // { // // for(k=idx; k<Q_len; k+=blockDim.x) // { // int v = Q[k]; // // atomicAdd((int*)&s_top, 1); // S[s_top] = v; // // for(r=R[v]; r<R[v+1]; r++) // { // long int w = C[r]; // // if(atomicCAS((int*)&d[w],INT_MAX,(int)d[v]+1) == INT_MAX) // { // int t = atomicAdd((int*)&Q2_len,1); // Q2[t] = w; // } // // if(d[w] == (d[v]+1)) // { // atomicAdd((int*)&sigma[w],sigma[v]); // atomicAdd((int*)&p_top[w], 1); // atomicAdd((int*)&P[w][p_top[w]-1], v); // } // } // } // __syncthreads(); // // if(Q2_len == 0) // break; // // else // { // for(k=idx; k<Q2_len; k+=blockDim.x) // Q[k] = Q2[k]; // // __syncthreads(); // // if(idx == 0) // { // Q_len = Q2_len; // Q2_len = 0; // } // __syncthreads(); // } // } // // while(s_top!=0) // { // atomicAdd((int*)&s_top, -1); // long int w = S[s_top]; // // for(k = 0; k < P[w][p_top[w]-1]; k++) // dep[k] += (double)(sigma[k] * (1 + dep[w]) / sigma[w]); // // if(w!=s) // atomicAdd((float*)&bc[w], (float)dep[w]); // // __syncthreads(); // } } int main() { long int* dim; dim = get_graph_dim("01.txt"); printf("Hello!\n"); long int v = dim[0], e = dim[1]; long int* csr; csr = read_csr("01.txt", v, e); printf("Holla!!\n"); long int *r, *c; double *bc; r = (long int*)malloc((v+1)*sizeof(long int)); c = (long int*)malloc(2*e*sizeof(long int)); bc = (double*)malloc(v*sizeof(double)); memcpy(r, csr, (v+1)*sizeof(long int)); memcpy(c, csr+v+1, 2*e*sizeof(long int)); free(csr); long int *d_v, *d_e; long int *d_r, *d_c; double *d_bc; printf("Sui!\n"); hipMalloc((void**)&d_v, sizeof(long int)); hipMalloc((void**)&d_e, sizeof(long int)); hipMalloc((void**)&d_bc, v * sizeof(double)); hipMalloc((void**)&d_r, (v+1) * sizeof(long int)); hipMalloc((void**)&d_c, 2*e * sizeof(long int)); hipMemcpy(d_v, &v, sizeof(long int), hipMemcpyHostToDevice); hipMemcpy(d_e, &e, sizeof(long int), hipMemcpyHostToDevice); hipMemcpy(d_r, r, (v+1)*sizeof(long int), hipMemcpyHostToDevice); hipMemcpy(d_c, c, 2*e*sizeof(long int), hipMemcpyHostToDevice); printf("Namaskara!\n"); dim3 dimGrid(v); dim3 dimBlock(1024); hipEvent_t start, stop; float timer; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( between_centre), dim3(dimGrid), dim3(dimBlock), 0, 0, d_bc, d_r, d_c, d_v, d_e); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timer, start, stop); hipEventDestroy(start); hipEventDestroy(stop); hipMemcpy(bc, d_bc, v*sizeof(double), hipMemcpyDeviceToHost); long int k; for(k = 0; k < v; k++) printf("%.2f ", bc[k]); printf("\nElapsed Time: %lf\n", timer); hipFree(d_v); hipFree(d_e); hipFree(d_bc); hipFree(d_r); hipFree(d_c); free(r); free(c); }
1b59bc2f4d4eef489c7b71da08ee141ebaf02613.cu
#include <stdio.h> #include <string.h> #include <stdbool.h> #include <cuda.h> #include <cuda_runtime_api.h> #define V 9 #define E 14 long int* get_graph_dim(char* filename) { FILE* file; file = fopen(filename, "r"); if (file == NULL) { printf("Unable to read the CSR file: %s.", filename); exit(1); } long int* dim = (long int*)malloc(2*sizeof(long int)); fscanf(file, "%ld %ld", &dim[0], &dim[1]); return dim; } long int* read_csr(char* filename, long int v, long int e) { FILE* file; file = fopen(filename, "r"); if (file == NULL) { printf("Unable to read the CSR file: %s.", filename); exit(1); } long int* csr = (long int*)malloc((v+1+(2*e))*sizeof(long int)); fscanf(file, "%ld %ld", &v, &e); long int i; for(i=0; i<v+1; i++) fscanf(file, "%ld", &csr[i]); for(; i<v+1+2*e; i++) fscanf(file, "%ld", &csr[i]); return csr; } __global__ void between_centre(double* bc, long int* R, long int* C, long int *vert, long int *edge) { //long int V = *vert; //long int E = *edge; long int idx = threadIdx.x; long int s = blockIdx.x; __shared__ long int d[V], sigma[V]; double dep[V]; __shared__ long int P[V][V]; __shared__ long int p_top[V]; long int k, r; //Initialize d and sigma // for(k=idx; k<V; k+=blockDim.x) // { // if(k == s) // { // d[k] = 0; // sigma[k] = 1; // } // else // { // d[k] = INT_MAX; // sigma[k] = 0; // } // // p_top[k] = 0; // // } // // __shared__ long int Q[V]; // __shared__ long int Q2[V]; // __shared__ long int Q_len; // __shared__ long int Q2_len; // // __shared__ long int S[V]; // __shared__ long int s_top; // // if(idx == 0) // { // Q[0] = s; // Q_len = 1; // Q2_len = 0; // s_top = 0; // } // __syncthreads(); // // while(1) // { // // for(k=idx; k<Q_len; k+=blockDim.x) // { // int v = Q[k]; // // atomicAdd((int*)&s_top, 1); // S[s_top] = v; // // for(r=R[v]; r<R[v+1]; r++) // { // long int w = C[r]; // // if(atomicCAS((int*)&d[w],INT_MAX,(int)d[v]+1) == INT_MAX) // { // int t = atomicAdd((int*)&Q2_len,1); // Q2[t] = w; // } // // if(d[w] == (d[v]+1)) // { // atomicAdd((int*)&sigma[w],sigma[v]); // atomicAdd((int*)&p_top[w], 1); // atomicAdd((int*)&P[w][p_top[w]-1], v); // } // } // } // __syncthreads(); // // if(Q2_len == 0) // break; // // else // { // for(k=idx; k<Q2_len; k+=blockDim.x) // Q[k] = Q2[k]; // // __syncthreads(); // // if(idx == 0) // { // Q_len = Q2_len; // Q2_len = 0; // } // __syncthreads(); // } // } // // while(s_top!=0) // { // atomicAdd((int*)&s_top, -1); // long int w = S[s_top]; // // for(k = 0; k < P[w][p_top[w]-1]; k++) // dep[k] += (double)(sigma[k] * (1 + dep[w]) / sigma[w]); // // if(w!=s) // atomicAdd((float*)&bc[w], (float)dep[w]); // // __syncthreads(); // } } int main() { long int* dim; dim = get_graph_dim("01.txt"); printf("Hello!\n"); long int v = dim[0], e = dim[1]; long int* csr; csr = read_csr("01.txt", v, e); printf("Holla!!\n"); long int *r, *c; double *bc; r = (long int*)malloc((v+1)*sizeof(long int)); c = (long int*)malloc(2*e*sizeof(long int)); bc = (double*)malloc(v*sizeof(double)); memcpy(r, csr, (v+1)*sizeof(long int)); memcpy(c, csr+v+1, 2*e*sizeof(long int)); free(csr); long int *d_v, *d_e; long int *d_r, *d_c; double *d_bc; printf("Sui!\n"); cudaMalloc((void**)&d_v, sizeof(long int)); cudaMalloc((void**)&d_e, sizeof(long int)); cudaMalloc((void**)&d_bc, v * sizeof(double)); cudaMalloc((void**)&d_r, (v+1) * sizeof(long int)); cudaMalloc((void**)&d_c, 2*e * sizeof(long int)); cudaMemcpy(d_v, &v, sizeof(long int), cudaMemcpyHostToDevice); cudaMemcpy(d_e, &e, sizeof(long int), cudaMemcpyHostToDevice); cudaMemcpy(d_r, r, (v+1)*sizeof(long int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, 2*e*sizeof(long int), cudaMemcpyHostToDevice); printf("Namaskara!\n"); dim3 dimGrid(v); dim3 dimBlock(1024); cudaEvent_t start, stop; float timer; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); between_centre<<<dimGrid, dimBlock>>>(d_bc, d_r, d_c, d_v, d_e); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timer, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(bc, d_bc, v*sizeof(double), cudaMemcpyDeviceToHost); long int k; for(k = 0; k < v; k++) printf("%.2f ", bc[k]); printf("\nElapsed Time: %lf\n", timer); cudaFree(d_v); cudaFree(d_e); cudaFree(d_bc); cudaFree(d_r); cudaFree(d_c); free(r); free(c); }
9d18caa9ebe89800da76aedc725f8fcdbba326c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> #include "common/Error.h" #include "common/Matrix.h" #include "common/GpuTimer.h" #include "common/CpuTimer.h" #define N 4096 #define K 16 void compareResults(Matrix<int> h_a, Matrix<int> h_b){ int i,j; for(i=0; i < h_a.width; i++){ for(j = 0; j < h_a.height; j++){ assert(h_a.elements[ j * h_a.width + i ] == h_b.elements[ i * h_b.width + j ] ); } } } __global__ void transposedMatrixKernelFinal(Matrix<int> d_a, Matrix<int> d_b){ // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * blockDim.x, in_corner_j = blockIdx.y * blockDim.y; int out_corner_i = blockIdx.y * blockDim.y, out_corner_j = blockIdx.x * blockDim.x; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[K][K]; while(in_corner_j + x < N){ in_corner_i = blockIdx.x * blockDim.x; out_corner_j = blockIdx.x * blockDim.x; while( in_corner_i + y < N) { tile[y][x] = d_a.elements[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); d_b.elements[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; in_corner_i += blockDim.x * gridDim.x; out_corner_j += blockDim.x * gridDim.x; } in_corner_j += gridDim.y * blockDim.y; out_corner_i += gridDim.y * blockDim.y; } } void onDevice( Matrix<int> h_a, Matrix<int> h_b ){ // declare GPU data Matrix<int> d_a, d_b; d_a.width = h_a.width; d_a.height = h_a.height; d_b.width = h_b.width; d_b.height = h_b.height; const int ARRAY_BYTES = d_a.width * d_a.height * sizeof(int); GpuTimer timer; timer.Start(); // allocate memory on the GPU HANDLER_ERROR_ERR(hipMalloc((void**)&d_a.elements,ARRAY_BYTES)); HANDLER_ERROR_ERR(hipMalloc((void**)&d_b.elements,ARRAY_BYTES)); // copy data from CPU the GPU HANDLER_ERROR_ERR(hipMemcpy(d_a.elements, h_a.elements, ARRAY_BYTES, hipMemcpyHostToDevice)); HANDLER_ERROR_ERR(hipMemcpy(d_b.elements, h_b.elements, ARRAY_BYTES, hipMemcpyHostToDevice)); dim3 GridBlocks( N/K,N/K ); dim3 ThreadsBlocks( K,K ); hipLaunchKernelGGL(( transposedMatrixKernelFinal), dim3(GridBlocks),dim3(ThreadsBlocks), 0, 0, d_a, d_b ); HANDLER_ERROR_MSG("kernel panic!!!"); // copy data back from the GPU to the CPU HANDLER_ERROR_ERR(hipMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES, hipMemcpyDeviceToHost)); timer.Stop(); printf( "Time Device final: %f ms\n", timer.Elapsed() ); compareResults(h_a, h_b); // free GPU memory HANDLER_ERROR_ERR(hipFree(d_a.elements)); HANDLER_ERROR_ERR(hipFree(d_b.elements)); } void onHost(){ Matrix<int> h_a, h_b; h_a.width = N; h_a.height = N; h_b.width = N; h_b.height = N; h_a.elements = (int*)malloc(h_a.width * h_a.height * sizeof(int)); h_b.elements = (int*)malloc(h_b.width * h_b.height * sizeof(int)); int i,j,k=0; for( i = 0; i < h_a.width; i++){ for( j = 0; j < h_a.height; j++){ h_a.elements[j*h_a.width + i] = k; h_b.elements[j*h_b.width + i] = 0; k++; } } // call device configuration onDevice(h_a,h_b); printf("-: successful execution :-\n"); } int main(){ onHost(); }
9d18caa9ebe89800da76aedc725f8fcdbba326c6.cu
#include <stdio.h> #include <assert.h> #include "common/Error.h" #include "common/Matrix.h" #include "common/GpuTimer.h" #include "common/CpuTimer.h" #define N 4096 #define K 16 void compareResults(Matrix<int> h_a, Matrix<int> h_b){ int i,j; for(i=0; i < h_a.width; i++){ for(j = 0; j < h_a.height; j++){ assert(h_a.elements[ j * h_a.width + i ] == h_b.elements[ i * h_b.width + j ] ); } } } __global__ void transposedMatrixKernelFinal(Matrix<int> d_a, Matrix<int> d_b){ // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * blockDim.x, in_corner_j = blockIdx.y * blockDim.y; int out_corner_i = blockIdx.y * blockDim.y, out_corner_j = blockIdx.x * blockDim.x; int x = threadIdx.x, y = threadIdx.y; __shared__ float tile[K][K]; while(in_corner_j + x < N){ in_corner_i = blockIdx.x * blockDim.x; out_corner_j = blockIdx.x * blockDim.x; while( in_corner_i + y < N) { tile[y][x] = d_a.elements[(in_corner_i + x) + (in_corner_j + y)*N]; __syncthreads(); d_b.elements[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y]; in_corner_i += blockDim.x * gridDim.x; out_corner_j += blockDim.x * gridDim.x; } in_corner_j += gridDim.y * blockDim.y; out_corner_i += gridDim.y * blockDim.y; } } void onDevice( Matrix<int> h_a, Matrix<int> h_b ){ // declare GPU data Matrix<int> d_a, d_b; d_a.width = h_a.width; d_a.height = h_a.height; d_b.width = h_b.width; d_b.height = h_b.height; const int ARRAY_BYTES = d_a.width * d_a.height * sizeof(int); GpuTimer timer; timer.Start(); // allocate memory on the GPU HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a.elements,ARRAY_BYTES)); HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b.elements,ARRAY_BYTES)); // copy data from CPU the GPU HANDLER_ERROR_ERR(cudaMemcpy(d_a.elements, h_a.elements, ARRAY_BYTES, cudaMemcpyHostToDevice)); HANDLER_ERROR_ERR(cudaMemcpy(d_b.elements, h_b.elements, ARRAY_BYTES, cudaMemcpyHostToDevice)); dim3 GridBlocks( N/K,N/K ); dim3 ThreadsBlocks( K,K ); transposedMatrixKernelFinal<<<GridBlocks,ThreadsBlocks>>>( d_a, d_b ); HANDLER_ERROR_MSG("kernel panic!!!"); // copy data back from the GPU to the CPU HANDLER_ERROR_ERR(cudaMemcpy(h_b.elements, d_b.elements, ARRAY_BYTES, cudaMemcpyDeviceToHost)); timer.Stop(); printf( "Time Device final: %f ms\n", timer.Elapsed() ); compareResults(h_a, h_b); // free GPU memory HANDLER_ERROR_ERR(cudaFree(d_a.elements)); HANDLER_ERROR_ERR(cudaFree(d_b.elements)); } void onHost(){ Matrix<int> h_a, h_b; h_a.width = N; h_a.height = N; h_b.width = N; h_b.height = N; h_a.elements = (int*)malloc(h_a.width * h_a.height * sizeof(int)); h_b.elements = (int*)malloc(h_b.width * h_b.height * sizeof(int)); int i,j,k=0; for( i = 0; i < h_a.width; i++){ for( j = 0; j < h_a.height; j++){ h_a.elements[j*h_a.width + i] = k; h_b.elements[j*h_b.width + i] = 0; k++; } } // call device configuration onDevice(h_a,h_b); printf("-: successful execution :-\n"); } int main(){ onHost(); }
1fe1d7e9354bcd45cc7684d6d93c417e6ba6e484.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call){ \ const hipError_t error = call; \ if (error != hipSuccess){ \ printf("Error: %s: %d, ",__FILE__,__LINE__);\ printf("code:%d, reason: %s\n",error,hipGetErrorString(error));\ exit(-10*error);\ }\ } double seconds(){ struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void initialInt(int *ip, int size){ for (int i=0; i<size; i++){ ip[i] = i; } } void printMatrix(int *C,const int nx,const int ny){ int *ic = C; printf("\n Matrix: (%d.%d)\n",nx,ny); for (int iy=0;iy<ny;iy++){ for (int ix=0;ix<nx;ix++){ printf("%3d",ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printThreadIndex(int *A,const int nx,const int ny){ int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy*nx + ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) " "global index %2d ival %2d \n",threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y, ix,iy,idx,A[idx]); } int main(int argc,char **argv){ printf("%s Starting...\n",argv[0]); // get device information int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp,dev)); printf("Using dev %d: %s\n",dev,deviceProp.name); CHECK(hipSetDevice(dev)); // set mat dim int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(float); // malloc host mem int *h_A; h_A = (int *)malloc(nBytes); // initialize host mat with integer initialInt(h_A,nxy); printMatrix(h_A,nx,ny); // malloc device mem int *d_MatA; hipMalloc((void **) &d_MatA,nBytes); // transfer data from host to dev hipMemcpy(d_MatA,h_A,nBytes,hipMemcpyHostToDevice); // set up execution config dim3 block(4,2); dim3 grid((nx+block.x-1)/block.x,(ny+block.y-1)/block.y); // invoke the kernel hipLaunchKernelGGL(( printThreadIndex) , dim3(grid),dim3(block) , 0, 0, d_MatA,nx,ny); hipDeviceSynchronize(); // free mem on host & dev hipFree(d_MatA); free(h_A); // reset device hipDeviceReset(); return (0); }
1fe1d7e9354bcd45cc7684d6d93c417e6ba6e484.cu
#include <cuda_runtime.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call){ \ const cudaError_t error = call; \ if (error != cudaSuccess){ \ printf("Error: %s: %d, ",__FILE__,__LINE__);\ printf("code:%d, reason: %s\n",error,cudaGetErrorString(error));\ exit(-10*error);\ }\ } double seconds(){ struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void initialInt(int *ip, int size){ for (int i=0; i<size; i++){ ip[i] = i; } } void printMatrix(int *C,const int nx,const int ny){ int *ic = C; printf("\n Matrix: (%d.%d)\n",nx,ny); for (int iy=0;iy<ny;iy++){ for (int ix=0;ix<nx;ix++){ printf("%3d",ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printThreadIndex(int *A,const int nx,const int ny){ int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy*nx + ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) " "global index %2d ival %2d \n",threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y, ix,iy,idx,A[idx]); } int main(int argc,char **argv){ printf("%s Starting...\n",argv[0]); // get device information int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp,dev)); printf("Using dev %d: %s\n",dev,deviceProp.name); CHECK(cudaSetDevice(dev)); // set mat dim int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(float); // malloc host mem int *h_A; h_A = (int *)malloc(nBytes); // initialize host mat with integer initialInt(h_A,nxy); printMatrix(h_A,nx,ny); // malloc device mem int *d_MatA; cudaMalloc((void **) &d_MatA,nBytes); // transfer data from host to dev cudaMemcpy(d_MatA,h_A,nBytes,cudaMemcpyHostToDevice); // set up execution config dim3 block(4,2); dim3 grid((nx+block.x-1)/block.x,(ny+block.y-1)/block.y); // invoke the kernel printThreadIndex <<< grid,block >>>(d_MatA,nx,ny); cudaDeviceSynchronize(); // free mem on host & dev cudaFree(d_MatA); free(h_A); // reset device cudaDeviceReset(); return (0); }
5a62b397f06fa520968934e622ef7049a1c8f29a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "mixed_tentusscher_myo_epi_2004_S2_14.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); check_cuda_error(hipFree(mapping_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice)); } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); if(mapping_device) check_cuda_error(hipFree(mapping_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { // Initial conditions for TenTusscher 2004 myocardium if (mapping[threadID] == 0) { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } // Initial conditions for TenTusscher 2004 epicardium else { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5520015958853,0.00129455548553585,0.779309085270448,0.779120507104534,0.000175257612314416,0.484920925268272,0.00294370895749353,0.999998342569330,1.93874711998085e-08,1.89517017546677e-05,0.999771487389649,1.00728300939714,0.999997507479585,4.07467073122400e-05,1.09809284664416,9.20129773009743,140.104443387177}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { if (mapping[sv_id] == 0) { RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } else { RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt); for (int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } } inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.8459241921870,0.000196064566156457,0.000146553942706429,0.000703301155773898,0.260161768868762,0.181317938505467,0.117372181971868,3.63855954741155,0.0150905585683178,2.46920388142297,1094.85218851589,0.000479578430398116,0.318788483309312,0.0161958786068848,0.00436114068069140,1.30104522530463e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
5a62b397f06fa520968934e622ef7049a1c8f29a.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "mixed_tentusscher_myo_epi_2004_S2_14.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); } kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); check_cuda_error(cudaFree(mapping_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } // Get the mapping array uint32_t *mapping = NULL; uint32_t *mapping_device = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); if(mapping_device) check_cuda_error(cudaFree(mapping_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { // Initial conditions for TenTusscher 2004 myocardium if (mapping[threadID] == 0) { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } // Initial conditions for TenTusscher 2004 epicardium else { // Default initial conditions /* *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5520015958853,0.00129455548553585,0.779309085270448,0.779120507104534,0.000175257612314416,0.484920925268272,0.00294370895749353,0.999998342569330,1.93874711998085e-08,1.89517017546677e-05,0.999771487389649,1.00728300939714,0.999997507479585,4.07467073122400e-05,1.09809284664416,9.20129773009743,140.104443387177}; for (uint32_t i = 0; i < NEQ; i++) *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { if (mapping[sv_id] == 0) { RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt); for(int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } else { RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt); for (int i = 0; i < NEQ; i++) { *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); } } } } } inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.8459241921870,0.000196064566156457,0.000146553942706429,0.000703301155773898,0.260161768868762,0.181317938505467,0.117372181971868,3.63855954741155,0.0150905585683178,2.46920388142297,1094.85218851589,0.000479578430398116,0.318788483309312,0.0161958786068848,0.00436114068069140,1.30104522530463e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
32937ee1880a974b8ec32ae97433aa49314244bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define LEN 5.f #define TIME_STEP 0.005f #define FINAL_TIME 10.f __device__ float scale(int i, int w) { return 2*LEN*(((1.f*i)/w) - 0.5f); } __device__ float f(float x, float y, float param, float sys){ if(sys == 1) return x - 2*param*y; if(sys == 2) return -x + param*(1 - x*x)*y; else return -x - 2*param*y; } __device__ float2 euler(float x, float y, float dt, float tFinal, float param, float sys){ float dx = 0.f, dy = 0.f; for(float t = 0;t < tFinal;t+=dt){ dx = dt*y; dy = dt*f(x, y, param, sys); x += dx; y += dy; } return make_float2(x, y); } __device__ unsigned char clip(float n){ return n > 255 ? 255 : (n < 0 ? 0 : n);} __global__ void stabImageKernel(uchar4 *d_out, int w, int h, float p, int s){ const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r = blockIdx.y*blockDim.y + threadIdx.y; if((c>=w) || (r>=h)) return; const int i = c + r*w; const float x0 = scale(c,w); const float y0 = scale(r, h); const float dist_0 = sqrt(x0*x0+y0*y0); const float2 pos = euler(x0,y0,TIME_STEP,FINAL_TIME,p,s); const float dist_f = sqrt(pos.x*pos.x+pos.y*pos.y); const float dist_r = dist_f/dist_0; d_out[i].x = clip(dist_r*255); d_out[i].y = ((c == w/2) || (r == h/2)) ? 255 : 0; d_out[i].z = clip((1/dist_r)*255); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, float p, int s){ const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX -1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( stabImageKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, p, s); }
32937ee1880a974b8ec32ae97433aa49314244bd.cu
#include "kernel.h" #define TX 32 #define TY 32 #define LEN 5.f #define TIME_STEP 0.005f #define FINAL_TIME 10.f __device__ float scale(int i, int w) { return 2*LEN*(((1.f*i)/w) - 0.5f); } __device__ float f(float x, float y, float param, float sys){ if(sys == 1) return x - 2*param*y; if(sys == 2) return -x + param*(1 - x*x)*y; else return -x - 2*param*y; } __device__ float2 euler(float x, float y, float dt, float tFinal, float param, float sys){ float dx = 0.f, dy = 0.f; for(float t = 0;t < tFinal;t+=dt){ dx = dt*y; dy = dt*f(x, y, param, sys); x += dx; y += dy; } return make_float2(x, y); } __device__ unsigned char clip(float n){ return n > 255 ? 255 : (n < 0 ? 0 : n);} __global__ void stabImageKernel(uchar4 *d_out, int w, int h, float p, int s){ const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r = blockIdx.y*blockDim.y + threadIdx.y; if((c>=w) || (r>=h)) return; const int i = c + r*w; const float x0 = scale(c,w); const float y0 = scale(r, h); const float dist_0 = sqrt(x0*x0+y0*y0); const float2 pos = euler(x0,y0,TIME_STEP,FINAL_TIME,p,s); const float dist_f = sqrt(pos.x*pos.x+pos.y*pos.y); const float dist_r = dist_f/dist_0; d_out[i].x = clip(dist_r*255); d_out[i].y = ((c == w/2) || (r == h/2)) ? 255 : 0; d_out[i].z = clip((1/dist_r)*255); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, float p, int s){ const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX -1)/TX, (h + TY - 1)/TY); stabImageKernel<<<gridSize, blockSize>>>(d_out, w, h, p, s); }
6c117048ee248949e221a57855530ac20c12732f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <omp.h> #include <hip/hip_runtime.h> #define N 100 __global__ void K(int *a, int start, int end) { printf("start = %d, end = %d\n", start, end); } int main() { int a[N]; int ii; omp_set_num_threads(5); #pragma omp parallel { #pragma omp parallel for for (ii = 0; ii < N; ++ii) { a[ii] = ii; } int nthreads = omp_get_num_threads(); int perthread = N / nthreads; int start = perthread * omp_get_thread_num(); int end = start + perthread; hipLaunchKernelGGL(( K), dim3(1), dim3(1), 0, 0, a, start, end); hipDeviceSynchronize(); } printf("All over.\n"); return 0; }
6c117048ee248949e221a57855530ac20c12732f.cu
#include <stdio.h> #include <omp.h> #include <cuda.h> #define N 100 __global__ void K(int *a, int start, int end) { printf("start = %d, end = %d\n", start, end); } int main() { int a[N]; int ii; omp_set_num_threads(5); #pragma omp parallel { #pragma omp parallel for for (ii = 0; ii < N; ++ii) { a[ii] = ii; } int nthreads = omp_get_num_threads(); int perthread = N / nthreads; int start = perthread * omp_get_thread_num(); int end = start + perthread; K<<<1, 1>>>(a, start, end); cudaDeviceSynchronize(); } printf("All over.\n"); return 0; }
8de65ab88cbe95560b25cdd5977f4b3f4fe03b14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 // DeepSpeed Team #include "custom_cuda_layers.h" const int unroll_factor = 4; __global__ void dropout_kernel(const int N, const float ratio, float* out, const float* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seed.first, idx, seed.second, &state); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float4 rand = hiprand_uniform4(&state); uint8_t m[unroll_factor]; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); int i = j * unroll_factor; mask[i] = (uint8_t)m[0]; mask[i + 1] = (uint8_t)m[1]; mask[i + 2] = (uint8_t)m[2]; mask[i + 3] = (uint8_t)m[3]; out[i] = Xdata[i] * scale * m[0]; out[i + 1] = Xdata[i + 1] * scale * m[1]; out[i + 2] = Xdata[i + 2] * scale * m[2]; out[i + 3] = Xdata[i + 3] * scale * m[3]; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = hiprand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { uint8_t m = (uint8_t)(rand_data[k++] > ratio); out[i] = Xdata[i] * scale * m; mask[i] = m; } } } __global__ void dropout_kernel(const int N, const float ratio, __half* out, const __half* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seed.first, idx, seed.second, &state); #ifdef __STOCHASTIC_MODE__ const __half2 h_scale = __float2half2_rn(scale); const float2* x_cast = reinterpret_cast<const float2*>(Xdata); float2* out_cast = reinterpret_cast<float2*>(out); uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask); uint32_t m_32; uint8_t* m = reinterpret_cast<uint8_t*>(&m_32); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); __half2 mask_h[2]; float2 mask_f[2]; CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_f = x_cast[j]; __half2* x_h = reinterpret_cast<__half2*>(&x_f); float4 rand = hiprand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); float* mask_f_data = &mask_f[0].x; #pragma unroll for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); mask_h[0] = __float22half2_rn(mask_f[0]); mask_h[1] = __float22half2_rn(mask_f[1]); result_h[0] = x_h[0] * h_scale * mask_h[0]; result_h[1] = x_h[1] * h_scale * mask_h[1]; out_cast[j] = result_f; mask_cast[j] = m_32; } #else CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { int i = j * unroll_factor; const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i); float2 vals_half_f[2]; vals_half_f[0] = __half22float2(vals_half[0]); vals_half_f[1] = __half22float2(vals_half[1]); uint8_t m[unroll_factor]; float4 rand = hiprand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); out[i] = __float2half(vals_half_f[0].x * scale * m[0]); out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); mask[i] = m[0]; mask[i + 1] = m[1]; mask[i + 2] = m[2]; mask[i + 3] = m[3]; } #endif int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = hiprand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { uint8_t m = (uint8_t)(rand_data[k++] > ratio); out[i] = __float2half((float)Xdata[i] * scale * m); mask[i] = m; } } } __global__ void dropout_kernel_bwd(const int N, const float ratio, const float* Xdata, float* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { int i = j * unroll_factor; out[i] = mask[i] ? Xdata[i] * scale : 0.0; out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } } } __global__ void dropout_kernel_bwd(const int N, const float ratio, const __half* Xdata, __half* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); #ifdef __STOCHASTIC_MODE__ const __half2 h_scale = __float2half2_rn(scale); const float2* x_cast = reinterpret_cast<const float2*>(Xdata); float2* out_cast = reinterpret_cast<float2*>(out); uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_f = x_cast[j]; __half2* x_h = reinterpret_cast<__half2*>(&x_f); uint32_t m_32 = mask_cast[j]; uint8_t* m = (uint8_t*)&m_32; __half2 mask_h[2]; float2 mask_f[2]; float* mask_f_data = &mask_f[0].x; #pragma unroll for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); #pragma unroll for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); result_h[0] = x_h[0] * h_scale * mask_h[0]; result_h[1] = x_h[1] * h_scale * mask_h[1]; out_cast[j] = result_f; } #else const __half h_scale = __float2half(scale); const __half h_zero = __float2half(0.0); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { int i = j * unroll_factor; const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i); uint8_t* m = mask + i; float2 vals_half_f[2]; vals_half_f[0] = __half22float2(vals_half[0]); vals_half_f[1] = __half22float2(vals_half[1]); out[i] = __float2half(vals_half_f[0].x * scale * m[0]); out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); } #endif int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { out[i] = __float2half((float)Xdata[i] * scale * mask[i]); } } } template <typename T> void launch_dropout(T* out, const T* vals, uint8_t* mask, int total_count, int dim, float ratio, hipStream_t stream, bool bwd) { assert(unroll_factor == 4); dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); dim3 block_dim = DS_CUDA_NUM_THREADS; if (dim > 512) { block_dim.x >>= 1; grid_dim.x <<= 1; } uint64_t inc = total_count / grid_dim.x / block_dim.x; std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc); if (bwd) hipLaunchKernelGGL(( dropout_kernel_bwd), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, ratio, vals, out, mask, seed); else hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, ratio, out, vals, mask, seed); } template void launch_dropout(float* out, const float* vals, uint8_t* mask, int total_count, int dim, float ratio, hipStream_t stream, bool); template void launch_dropout(__half* out, const __half* vals, uint8_t* mask, int total_count, int dim, float ratio, hipStream_t stream, bool); __global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) { CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } } __global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) { const __half2 h_scale = __float2half2_rn(scale); float2* x_cast = reinterpret_cast<float2*>(Xdata); uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_data = x_cast[j]; uint32_t m_32 = mask_cast[j]; uint8_t* m = (uint8_t*)&m_32; float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); #ifdef __STOCHASTIC_MODE__ __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); __half2 mask_h[2]; float2 mask_f[2]; float* mask_f_data = &mask_f[0].x; #pragma unroll for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); mask_h[0] = __float22half2_rn(mask_f[0]); mask_h[1] = __float22half2_rn(mask_f[1]); result_h[0] = x_data_h[0] * h_scale * mask_h[0]; result_h[1] = x_data_h[1] * h_scale * mask_h[1]; #else __half* x_data_h = reinterpret_cast<__half*>(&x_data); float2 result[2]; result[0].x = (float)x_data_h[0] * scale * m[0]; result[0].y = (float)x_data_h[1] * scale * m[1]; result[1].x = (float)x_data_h[2] * scale * m[2]; result[1].y = (float)x_data_h[3] * scale * m[3]; result_h[0] = __float22half2_rn(result[0]); result_h[1] = __float22half2_rn(result[1]); #endif x_cast[j] = result_f; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); } } } template <typename T> void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream) { assert(unroll_factor == 4); const float scale = 1. / (1. - ratio); hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)), dim3(DS_CUDA_NUM_THREADS), 0, stream, total_count, scale, vals, mask); } template void launch_dropout_grad(float* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream); template void launch_dropout_grad(__half* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream); __global__ void dropout_grad_kernel(const int N, const float scale, const float* Xdata, float* out, uint8_t* mask) { CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } } __global__ void dropout_grad_kernel(const int N, const float scale, const __half* Xdata, __half* out, uint8_t* mask) { const float2* x_cast = reinterpret_cast<const float2*>(Xdata); float2* out_cast = reinterpret_cast<float2*>(out); const uint32_t* mask_cast = reinterpret_cast<const uint32_t*>(mask); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_data = x_cast[j]; uint32_t m_32 = mask_cast[j]; uint8_t* m = (uint8_t*)&m_32; __half* x_data_h = reinterpret_cast<__half*>(&x_data); float2 result[2]; result[0].x = (float)x_data_h[0] * scale * m[0]; result[0].y = (float)x_data_h[1] * scale * m[1]; result[1].x = (float)x_data_h[2] * scale * m[2]; result[1].y = (float)x_data_h[3] * scale * m[3]; result_h[0] = __float22half2_rn(result[0]); result_h[1] = __float22half2_rn(result[1]); out_cast[j] = result_f; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { out[i] = __float2half((float)Xdata[i] * scale * mask[i]); } } } template <typename T> void launch_dropout_grad(T* vals_out, const T* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream) { assert(unroll_factor == 4); const float scale = 1. / (1. - ratio); hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)), dim3(DS_CUDA_NUM_THREADS), 0, stream, total_count, scale, vals, vals_out, mask); } template void launch_dropout_grad(float*, const float* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream); template void launch_dropout_grad(__half*, const __half* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream); __global__ void dropout_kernel(const int N, const int dim, const float ratio, const float* bias, float* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); hiprandStatePhilox4_32_10_t state; hiprand_init(seed.first, idx, seed.second, &state); float4* Xdata_cast = reinterpret_cast<float4*>(Xdata); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float4* bias_cast = reinterpret_cast<const float4*>(bias); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = hiprand_uniform4(&state); uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); float4 x_data = Xdata_cast[j]; float4 b_data = bias_cast[j % (dim / unroll_factor)]; x_data.x += b_data.x; x_data.y += b_data.y; x_data.z += b_data.z; x_data.w += b_data.w; x_data.x = x_data.x * scale * m[0]; x_data.y = x_data.y * scale * m[1]; x_data.z = x_data.z * scale * m[2]; x_data.w = x_data.w * scale * m[3]; mask_32[j] = m_32; Xdata_cast[j] = x_data; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = hiprand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = Xdata[i] + bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); Xdata[i] = x_data * scale * m; mask[i] = m; } } } __global__ void dropout_kernel(const int N, const int dim, const float ratio, const __half* bias, __half* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); hiprandStatePhilox4_32_10_t state; hiprand_init(seed.first, idx, seed.second, &state); float2* Xdata_cast = reinterpret_cast<float2*>(Xdata); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float2* bias_cast = reinterpret_cast<const float2*>(bias); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = hiprand_uniform4(&state); float2 data_f; __half2* data_h = reinterpret_cast<__half2*>(&data_f); float2 bias_f; __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); data_f = Xdata_cast[j]; bias_f = bias_cast[j % (dim / unroll_factor)]; float2 data_h_0 = __half22float2(data_h[0]); float2 data_h_1 = __half22float2(data_h[1]); float2 bias_h_0 = __half22float2(bias_h[0]); float2 bias_h_1 = __half22float2(bias_h[1]); data_h_0.x += bias_h_0.x; data_h_0.y += bias_h_0.y; data_h_1.x += bias_h_1.x; data_h_1.y += bias_h_1.y; uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); data_h_0.x = __float2half(data_h_0.x * scale * m[0]); data_h_0.y = __float2half(data_h_0.y * scale * m[1]); data_h_1.x = __float2half(data_h_1.x * scale * m[2]); data_h_1.y = __float2half(data_h_1.y * scale * m[3]); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); result_h[0] = __float22half2_rn(data_h_0); result_h[1] = __float22half2_rn(data_h_1); Xdata_cast[j] = result_f; mask_32[j] = m_32; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = hiprand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = (float)Xdata[i] + (float)bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); Xdata[i] = __float2half(x_data * scale * m); mask[i] = m; } } } template <typename T> void launch_dropout(T* out, const T* bias, uint8_t* mask, int batch, int dim, float ratio, hipStream_t stream) { assert(unroll_factor == 4); int total_count = batch * dim / unroll_factor; dim3 grid_dim = DS_GET_BLOCKS(total_count); dim3 block_dim = DS_CUDA_NUM_THREADS; uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc); hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, dim, ratio, bias, out, mask, seed); } template void launch_dropout(float*, const float* bias, uint8_t* mask, int batch, int dim, float ratio, hipStream_t stream); template void launch_dropout(__half*, const __half* bias, uint8_t* mask, int batch, int dim, float ratio, hipStream_t stream); __global__ void dropout_kernel(const int N, const int dim, const float ratio, const float* input, const float* residual, const float* bias, float* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); hiprandStatePhilox4_32_10_t state; hiprand_init(seed.first, idx, seed.second, &state); float4* out_cast = reinterpret_cast<float4*>(out); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float4* bias_cast = reinterpret_cast<const float4*>(bias); const float4* residual_cast = reinterpret_cast<const float4*>(residual); const float4* input_cast = reinterpret_cast<const float4*>(input); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = hiprand_uniform4(&state); uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); float4 out_data; float4 b_data = bias_cast[j % (dim / unroll_factor)]; float4 res_data = residual_cast[j]; float4 inp_data = input_cast[j]; out_data.x = (b_data.x + inp_data.x); out_data.y = (b_data.y + inp_data.y); out_data.z = (b_data.z + inp_data.z); out_data.w = (b_data.w + inp_data.w); out_data.x = out_data.x * scale * m[0]; out_data.y = out_data.y * scale * m[1]; out_data.z = out_data.z * scale * m[2]; out_data.w = out_data.w * scale * m[3]; out_data.x += res_data.x; out_data.y += res_data.y; out_data.z += res_data.z; out_data.w += res_data.w; mask_32[j] = m_32; out_cast[j] = out_data; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = hiprand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = input[i] + bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); x_data = x_data * scale * m; x_data += residual[i]; out[i] = x_data; mask[i] = m; } } } __global__ void dropout_kernel(const int N, const int dim, const float ratio, const __half* input, const __half* residual, const __half* bias, __half* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); hiprandStatePhilox4_32_10_t state; hiprand_init(seed.first, idx, seed.second, &state); float2* out_cast = reinterpret_cast<float2*>(out); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float2* bias_cast = reinterpret_cast<const float2*>(bias); const float2* residual_cast = reinterpret_cast<const float2*>(residual); const float2* input_cast = reinterpret_cast<const float2*>(input); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = hiprand_uniform4(&state); float2 data_f; __half2* data_h = reinterpret_cast<__half2*>(&data_f); float2 bias_f; __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); float2 residual_f; __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); float2 input_f; __half2* input_h = reinterpret_cast<__half2*>(&input_f); bias_f = bias_cast[j % (dim / unroll_factor)]; residual_f = residual_cast[j]; input_f = input_cast[j]; float2 data_h_0 = __half22float2(data_h[0]); float2 data_h_1 = __half22float2(data_h[1]); float2 bias_h_0 = __half22float2(bias_h[0]); float2 bias_h_1 = __half22float2(bias_h[1]); float2 residual_h_0 = __half22float2(residual_h[0]); float2 residual_h_1 = __half22float2(residual_h[1]); float2 input_h_0 = __half22float2(input_h[0]); float2 input_h_1 = __half22float2(input_h[1]); data_h_0.x = (bias_h_0.x + input_h_0.x); data_h_0.y = (bias_h_0.y + input_h_0.y); data_h_1.x = (bias_h_1.x + input_h_1.x); data_h_1.y = (bias_h_1.y + input_h_1.y); uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); data_h_0.x = __float2half(data_h_0.x * scale * m[0]); data_h_0.y = __float2half(data_h_0.y * scale * m[1]); data_h_1.x = __float2half(data_h_1.x * scale * m[2]); data_h_1.y = __float2half(data_h_1.y * scale * m[3]); data_h_0.x += residual_h_0.x; data_h_0.y += residual_h_0.y; data_h_1.x += residual_h_1.x; data_h_1.y += residual_h_1.y; float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); result_h[0] = __float22half2_rn(data_h_0); result_h[1] = __float22half2_rn(data_h_1); out_cast[j] = result_f; mask_32[j] = m_32; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = hiprand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = (float)input[i] + (float)bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); x_data = x_data * scale * m; x_data += (float)residual[i]; out[i] = __float2half(x_data); mask[i] = m; } } } template <typename T> void launch_dropout(T* out, const T* input, const T* residual, const T* bias, uint8_t* mask, int batch, int dim, float ratio, hipStream_t stream) { assert(unroll_factor == 4); int total_count = batch * dim / unroll_factor; dim3 grid_dim = DS_GET_BLOCKS(total_count); dim3 block_dim = DS_CUDA_NUM_THREADS; uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc); hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, dim, ratio, input, residual, bias, out, mask, seed); } template void launch_dropout(float*, const float*, const float* residual, const float* bias, uint8_t* mask, int batch, int dim, float ratio, hipStream_t stream); template void launch_dropout(__half*, const __half*, const __half* residual, const __half* bias, uint8_t* mask, int batch, int dim, float ratio, hipStream_t stream);
8de65ab88cbe95560b25cdd5977f4b3f4fe03b14.cu
// Copyright (c) Microsoft Corporation. // SPDX-License-Identifier: Apache-2.0 // DeepSpeed Team #include "custom_cuda_layers.h" const int unroll_factor = 4; __global__ void dropout_kernel(const int N, const float ratio, float* out, const float* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seed.first, idx, seed.second, &state); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float4 rand = curand_uniform4(&state); uint8_t m[unroll_factor]; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); int i = j * unroll_factor; mask[i] = (uint8_t)m[0]; mask[i + 1] = (uint8_t)m[1]; mask[i + 2] = (uint8_t)m[2]; mask[i + 3] = (uint8_t)m[3]; out[i] = Xdata[i] * scale * m[0]; out[i + 1] = Xdata[i + 1] * scale * m[1]; out[i + 2] = Xdata[i + 2] * scale * m[2]; out[i + 3] = Xdata[i + 3] * scale * m[3]; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = curand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { uint8_t m = (uint8_t)(rand_data[k++] > ratio); out[i] = Xdata[i] * scale * m; mask[i] = m; } } } __global__ void dropout_kernel(const int N, const float ratio, __half* out, const __half* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seed.first, idx, seed.second, &state); #ifdef __STOCHASTIC_MODE__ const __half2 h_scale = __float2half2_rn(scale); const float2* x_cast = reinterpret_cast<const float2*>(Xdata); float2* out_cast = reinterpret_cast<float2*>(out); uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask); uint32_t m_32; uint8_t* m = reinterpret_cast<uint8_t*>(&m_32); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); __half2 mask_h[2]; float2 mask_f[2]; CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_f = x_cast[j]; __half2* x_h = reinterpret_cast<__half2*>(&x_f); float4 rand = curand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); float* mask_f_data = &mask_f[0].x; #pragma unroll for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); mask_h[0] = __float22half2_rn(mask_f[0]); mask_h[1] = __float22half2_rn(mask_f[1]); result_h[0] = x_h[0] * h_scale * mask_h[0]; result_h[1] = x_h[1] * h_scale * mask_h[1]; out_cast[j] = result_f; mask_cast[j] = m_32; } #else CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { int i = j * unroll_factor; const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i); float2 vals_half_f[2]; vals_half_f[0] = __half22float2(vals_half[0]); vals_half_f[1] = __half22float2(vals_half[1]); uint8_t m[unroll_factor]; float4 rand = curand_uniform4(&state); m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); out[i] = __float2half(vals_half_f[0].x * scale * m[0]); out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); mask[i] = m[0]; mask[i + 1] = m[1]; mask[i + 2] = m[2]; mask[i + 3] = m[3]; } #endif int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = curand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { uint8_t m = (uint8_t)(rand_data[k++] > ratio); out[i] = __float2half((float)Xdata[i] * scale * m); mask[i] = m; } } } __global__ void dropout_kernel_bwd(const int N, const float ratio, const float* Xdata, float* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { int i = j * unroll_factor; out[i] = mask[i] ? Xdata[i] * scale : 0.0; out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } } } __global__ void dropout_kernel_bwd(const int N, const float ratio, const __half* Xdata, __half* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); #ifdef __STOCHASTIC_MODE__ const __half2 h_scale = __float2half2_rn(scale); const float2* x_cast = reinterpret_cast<const float2*>(Xdata); float2* out_cast = reinterpret_cast<float2*>(out); uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_f = x_cast[j]; __half2* x_h = reinterpret_cast<__half2*>(&x_f); uint32_t m_32 = mask_cast[j]; uint8_t* m = (uint8_t*)&m_32; __half2 mask_h[2]; float2 mask_f[2]; float* mask_f_data = &mask_f[0].x; #pragma unroll for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); #pragma unroll for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); result_h[0] = x_h[0] * h_scale * mask_h[0]; result_h[1] = x_h[1] * h_scale * mask_h[1]; out_cast[j] = result_f; } #else const __half h_scale = __float2half(scale); const __half h_zero = __float2half(0.0); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { int i = j * unroll_factor; const __half2* vals_half = reinterpret_cast<const __half2*>(Xdata + i); uint8_t* m = mask + i; float2 vals_half_f[2]; vals_half_f[0] = __half22float2(vals_half[0]); vals_half_f[1] = __half22float2(vals_half[1]); out[i] = __float2half(vals_half_f[0].x * scale * m[0]); out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); } #endif int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { out[i] = __float2half((float)Xdata[i] * scale * mask[i]); } } } template <typename T> void launch_dropout(T* out, const T* vals, uint8_t* mask, int total_count, int dim, float ratio, cudaStream_t stream, bool bwd) { assert(unroll_factor == 4); dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); dim3 block_dim = DS_CUDA_NUM_THREADS; if (dim > 512) { block_dim.x >>= 1; grid_dim.x <<= 1; } uint64_t inc = total_count / grid_dim.x / block_dim.x; std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc); if (bwd) dropout_kernel_bwd<<<grid_dim, block_dim, 0, stream>>>( total_count, ratio, vals, out, mask, seed); else dropout_kernel<<<grid_dim, block_dim, 0, stream>>>( total_count, ratio, out, vals, mask, seed); } template void launch_dropout(float* out, const float* vals, uint8_t* mask, int total_count, int dim, float ratio, cudaStream_t stream, bool); template void launch_dropout(__half* out, const __half* vals, uint8_t* mask, int total_count, int dim, float ratio, cudaStream_t stream, bool); __global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) { CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } } __global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) { const __half2 h_scale = __float2half2_rn(scale); float2* x_cast = reinterpret_cast<float2*>(Xdata); uint32_t* mask_cast = reinterpret_cast<uint32_t*>(mask); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_data = x_cast[j]; uint32_t m_32 = mask_cast[j]; uint8_t* m = (uint8_t*)&m_32; float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); #ifdef __STOCHASTIC_MODE__ __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); __half2 mask_h[2]; float2 mask_f[2]; float* mask_f_data = &mask_f[0].x; #pragma unroll for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); mask_h[0] = __float22half2_rn(mask_f[0]); mask_h[1] = __float22half2_rn(mask_f[1]); result_h[0] = x_data_h[0] * h_scale * mask_h[0]; result_h[1] = x_data_h[1] * h_scale * mask_h[1]; #else __half* x_data_h = reinterpret_cast<__half*>(&x_data); float2 result[2]; result[0].x = (float)x_data_h[0] * scale * m[0]; result[0].y = (float)x_data_h[1] * scale * m[1]; result[1].x = (float)x_data_h[2] * scale * m[2]; result[1].y = (float)x_data_h[3] * scale * m[3]; result_h[0] = __float22half2_rn(result[0]); result_h[1] = __float22half2_rn(result[1]); #endif x_cast[j] = result_f; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); } } } template <typename T> void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream) { assert(unroll_factor == 4); const float scale = 1. / (1. - ratio); dropout_grad_kernel<<<DS_GET_BLOCKS(total_count / unroll_factor), DS_CUDA_NUM_THREADS, 0, stream>>>(total_count, scale, vals, mask); } template void launch_dropout_grad(float* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream); template void launch_dropout_grad(__half* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream); __global__ void dropout_grad_kernel(const int N, const float scale, const float* Xdata, float* out, uint8_t* mask) { CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } } __global__ void dropout_grad_kernel(const int N, const float scale, const __half* Xdata, __half* out, uint8_t* mask) { const float2* x_cast = reinterpret_cast<const float2*>(Xdata); float2* out_cast = reinterpret_cast<float2*>(out); const uint32_t* mask_cast = reinterpret_cast<const uint32_t*>(mask); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) { float2 x_data = x_cast[j]; uint32_t m_32 = mask_cast[j]; uint8_t* m = (uint8_t*)&m_32; __half* x_data_h = reinterpret_cast<__half*>(&x_data); float2 result[2]; result[0].x = (float)x_data_h[0] * scale * m[0]; result[0].y = (float)x_data_h[1] * scale * m[1]; result[1].x = (float)x_data_h[2] * scale * m[2]; result[1].y = (float)x_data_h[3] * scale * m[3]; result_h[0] = __float22half2_rn(result[0]); result_h[1] = __float22half2_rn(result[1]); out_cast[j] = result_f; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { for (int i = high_index; i < N; i++) { out[i] = __float2half((float)Xdata[i] * scale * mask[i]); } } } template <typename T> void launch_dropout_grad(T* vals_out, const T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream) { assert(unroll_factor == 4); const float scale = 1. / (1. - ratio); dropout_grad_kernel<<<DS_GET_BLOCKS(total_count / unroll_factor), DS_CUDA_NUM_THREADS, 0, stream>>>(total_count, scale, vals, vals_out, mask); } template void launch_dropout_grad(float*, const float* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream); template void launch_dropout_grad(__half*, const __half* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream); __global__ void dropout_kernel(const int N, const int dim, const float ratio, const float* bias, float* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); curandStatePhilox4_32_10_t state; curand_init(seed.first, idx, seed.second, &state); float4* Xdata_cast = reinterpret_cast<float4*>(Xdata); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float4* bias_cast = reinterpret_cast<const float4*>(bias); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = curand_uniform4(&state); uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); float4 x_data = Xdata_cast[j]; float4 b_data = bias_cast[j % (dim / unroll_factor)]; x_data.x += b_data.x; x_data.y += b_data.y; x_data.z += b_data.z; x_data.w += b_data.w; x_data.x = x_data.x * scale * m[0]; x_data.y = x_data.y * scale * m[1]; x_data.z = x_data.z * scale * m[2]; x_data.w = x_data.w * scale * m[3]; mask_32[j] = m_32; Xdata_cast[j] = x_data; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = curand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = Xdata[i] + bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); Xdata[i] = x_data * scale * m; mask[i] = m; } } } __global__ void dropout_kernel(const int N, const int dim, const float ratio, const __half* bias, __half* Xdata, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); curandStatePhilox4_32_10_t state; curand_init(seed.first, idx, seed.second, &state); float2* Xdata_cast = reinterpret_cast<float2*>(Xdata); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float2* bias_cast = reinterpret_cast<const float2*>(bias); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = curand_uniform4(&state); float2 data_f; __half2* data_h = reinterpret_cast<__half2*>(&data_f); float2 bias_f; __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); data_f = Xdata_cast[j]; bias_f = bias_cast[j % (dim / unroll_factor)]; float2 data_h_0 = __half22float2(data_h[0]); float2 data_h_1 = __half22float2(data_h[1]); float2 bias_h_0 = __half22float2(bias_h[0]); float2 bias_h_1 = __half22float2(bias_h[1]); data_h_0.x += bias_h_0.x; data_h_0.y += bias_h_0.y; data_h_1.x += bias_h_1.x; data_h_1.y += bias_h_1.y; uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); data_h_0.x = __float2half(data_h_0.x * scale * m[0]); data_h_0.y = __float2half(data_h_0.y * scale * m[1]); data_h_1.x = __float2half(data_h_1.x * scale * m[2]); data_h_1.y = __float2half(data_h_1.y * scale * m[3]); float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); result_h[0] = __float22half2_rn(data_h_0); result_h[1] = __float22half2_rn(data_h_1); Xdata_cast[j] = result_f; mask_32[j] = m_32; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = curand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = (float)Xdata[i] + (float)bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); Xdata[i] = __float2half(x_data * scale * m); mask[i] = m; } } } template <typename T> void launch_dropout(T* out, const T* bias, uint8_t* mask, int batch, int dim, float ratio, cudaStream_t stream) { assert(unroll_factor == 4); int total_count = batch * dim / unroll_factor; dim3 grid_dim = DS_GET_BLOCKS(total_count); dim3 block_dim = DS_CUDA_NUM_THREADS; uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc); dropout_kernel<<<grid_dim, block_dim, 0, stream>>>( total_count, dim, ratio, bias, out, mask, seed); } template void launch_dropout(float*, const float* bias, uint8_t* mask, int batch, int dim, float ratio, cudaStream_t stream); template void launch_dropout(__half*, const __half* bias, uint8_t* mask, int batch, int dim, float ratio, cudaStream_t stream); __global__ void dropout_kernel(const int N, const int dim, const float ratio, const float* input, const float* residual, const float* bias, float* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); curandStatePhilox4_32_10_t state; curand_init(seed.first, idx, seed.second, &state); float4* out_cast = reinterpret_cast<float4*>(out); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float4* bias_cast = reinterpret_cast<const float4*>(bias); const float4* residual_cast = reinterpret_cast<const float4*>(residual); const float4* input_cast = reinterpret_cast<const float4*>(input); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = curand_uniform4(&state); uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); float4 out_data; float4 b_data = bias_cast[j % (dim / unroll_factor)]; float4 res_data = residual_cast[j]; float4 inp_data = input_cast[j]; out_data.x = (b_data.x + inp_data.x); out_data.y = (b_data.y + inp_data.y); out_data.z = (b_data.z + inp_data.z); out_data.w = (b_data.w + inp_data.w); out_data.x = out_data.x * scale * m[0]; out_data.y = out_data.y * scale * m[1]; out_data.z = out_data.z * scale * m[2]; out_data.w = out_data.w * scale * m[3]; out_data.x += res_data.x; out_data.y += res_data.y; out_data.z += res_data.z; out_data.w += res_data.w; mask_32[j] = m_32; out_cast[j] = out_data; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = curand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = input[i] + bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); x_data = x_data * scale * m; x_data += residual[i]; out[i] = x_data; mask[i] = m; } } } __global__ void dropout_kernel(const int N, const int dim, const float ratio, const __half* input, const __half* residual, const __half* bias, __half* out, uint8_t* mask, std::pair<uint64_t, uint64_t> seed) { const float scale = 1. / (1. - ratio); int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x % (dim / unroll_factor); curandStatePhilox4_32_10_t state; curand_init(seed.first, idx, seed.second, &state); float2* out_cast = reinterpret_cast<float2*>(out); uint32_t* mask_32 = reinterpret_cast<uint32_t*>(mask); const float2* bias_cast = reinterpret_cast<const float2*>(bias); const float2* residual_cast = reinterpret_cast<const float2*>(residual); const float2* input_cast = reinterpret_cast<const float2*>(input); CUDA_1D_KERNEL_LOOP(j, N) { float4 rand = curand_uniform4(&state); float2 data_f; __half2* data_h = reinterpret_cast<__half2*>(&data_f); float2 bias_f; __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); float2 residual_f; __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); float2 input_f; __half2* input_h = reinterpret_cast<__half2*>(&input_f); bias_f = bias_cast[j % (dim / unroll_factor)]; residual_f = residual_cast[j]; input_f = input_cast[j]; float2 data_h_0 = __half22float2(data_h[0]); float2 data_h_1 = __half22float2(data_h[1]); float2 bias_h_0 = __half22float2(bias_h[0]); float2 bias_h_1 = __half22float2(bias_h[1]); float2 residual_h_0 = __half22float2(residual_h[0]); float2 residual_h_1 = __half22float2(residual_h[1]); float2 input_h_0 = __half22float2(input_h[0]); float2 input_h_1 = __half22float2(input_h[1]); data_h_0.x = (bias_h_0.x + input_h_0.x); data_h_0.y = (bias_h_0.y + input_h_0.y); data_h_1.x = (bias_h_1.x + input_h_1.x); data_h_1.y = (bias_h_1.y + input_h_1.y); uint32_t m_32; uint8_t* m = (uint8_t*)&m_32; m[0] = (uint8_t)(rand.x > ratio); m[1] = (uint8_t)(rand.y > ratio); m[2] = (uint8_t)(rand.z > ratio); m[3] = (uint8_t)(rand.w > ratio); data_h_0.x = __float2half(data_h_0.x * scale * m[0]); data_h_0.y = __float2half(data_h_0.y * scale * m[1]); data_h_1.x = __float2half(data_h_1.x * scale * m[2]); data_h_1.y = __float2half(data_h_1.y * scale * m[3]); data_h_0.x += residual_h_0.x; data_h_0.y += residual_h_0.y; data_h_1.x += residual_h_1.x; data_h_1.y += residual_h_1.y; float2 result_f; __half2* result_h = reinterpret_cast<__half2*>(&result_f); result_h[0] = __float22half2_rn(data_h_0); result_h[1] = __float22half2_rn(data_h_1); out_cast[j] = result_f; mask_32[j] = m_32; } int high_index = ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; if (N > high_index) { float4 rand = curand_uniform4(&state); float* rand_data = &(rand.x); int k = 0; for (int i = high_index; i < N; i++) { float x_data = (float)input[i] + (float)bias[i % dim]; uint8_t m = (uint8_t)(rand_data[k++] > ratio); x_data = x_data * scale * m; x_data += (float)residual[i]; out[i] = __float2half(x_data); mask[i] = m; } } } template <typename T> void launch_dropout(T* out, const T* input, const T* residual, const T* bias, uint8_t* mask, int batch, int dim, float ratio, cudaStream_t stream) { assert(unroll_factor == 4); int total_count = batch * dim / unroll_factor; dim3 grid_dim = DS_GET_BLOCKS(total_count); dim3 block_dim = DS_CUDA_NUM_THREADS; uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; std::pair<uint64_t, uint64_t> seed = Context::Instance().IncrementOffset(inc); dropout_kernel<<<grid_dim, block_dim, 0, stream>>>( total_count, dim, ratio, input, residual, bias, out, mask, seed); } template void launch_dropout(float*, const float*, const float* residual, const float* bias, uint8_t* mask, int batch, int dim, float ratio, cudaStream_t stream); template void launch_dropout(__half*, const __half*, const __half* residual, const __half* bias, uint8_t* mask, int batch, int dim, float ratio, cudaStream_t stream);
1967fd0dac5f629b6461b7f20080da7cd2035d48.hip
// !!! This is a file automatically generated by hipify!!! #include "DeviceManager.h" #include <hip/hip_runtime_api.h> #include <cmath> #include <cstdlib> #include <helper_cuda.h> #include <hiprand/hiprand_kernel.h> #include "RayTracer.h" #include "Setting.h" #include "RTSampler.h" #include <iostream> #include "Float2Byte.h" #include "Engine.h" #include "Material.h" #include "Scene.h" DeviceManager::DeviceManager(){} DeviceManager::~DeviceManager(){} void DeviceManager::PrintDeviceInfo() { auto device_count = 0; hipGetDeviceCount(&device_count); if (device_count == 0) { printf("CUDA!\n"); return; } for (auto dev = 0; dev < device_count; dev++) { hipSetDevice(dev); hipDeviceProp_t device_prop{}; hipGetDeviceProperties(&device_prop, dev); printf(" %d: \"%s\"\n", dev, device_prop.name); char msg[256]; sprintf_s(msg, sizeof(msg), "global memory: %.0f MBytes " "(%llu bytes)\n", static_cast<float>(device_prop.totalGlobalMem / 1048576.0f), static_cast<unsigned long long>(device_prop.totalGlobalMem)); printf("%s", msg); printf("SM: %2d \nSM CUDA: %3d \nCUDA: %d \n", device_prop.multiProcessorCount, _ConvertSMVer2Cores(device_prop.major, device_prop.minor), _ConvertSMVer2Cores(device_prop.major, device_prop.minor) * device_prop.multiProcessorCount); printf(": %zu bytes\n", device_prop.totalConstMem); printf("block: %zu bytes\n", device_prop.sharedMemPerBlock); printf("block: %d\n", device_prop.regsPerBlock); printf(": %d\n", device_prop.warpSize); printf(": %d\n", device_prop.maxThreadsPerMultiProcessor); printf("block: %d\n", device_prop.maxThreadsPerBlock); printf(" (%d, %d, %d)\n", device_prop.maxThreadsDim[0], device_prop.maxThreadsDim[1], device_prop.maxThreadsDim[2]); printf(" (%d, %d, %d)\n", device_prop.maxGridSize[0], device_prop.maxGridSize[1], device_prop.maxGridSize[2]); printf("\n"); } printf("************************\n\n"); } void DeviceManager::Init(RayTracer* tracer,HostScene scene) { ray_tracer = tracer; grid = dim3(ray_tracer->width / Setting::BlockSize, ray_tracer->height / Setting::BlockSize); block = dim3(Setting::BlockSize, Setting::BlockSize); const size_t newHeapSize = 4608ull * 1024ull * 1024ull;; hipDeviceSetLimit(hipLimitStackSize, newHeapSize); host_float_data = new float[ray_tracer->width * ray_tracer->height * 4]; hipMalloc(reinterpret_cast<void**>(&devicde_float_data), ray_tracer->width * ray_tracer->height * 4 * sizeof(float)); hipMalloc(reinterpret_cast<void**>(&devicde_byte_data), ray_tracer->width * ray_tracer->height * 4 * sizeof(GLbyte)); hipMalloc(reinterpret_cast<void**>(&rng_states), grid.x * block.x * sizeof(hiprandState_t)); hipMalloc(reinterpret_cast<void**>(&d_camera), sizeof(Camera)); d_data = RTHostData(); hipMalloc(reinterpret_cast<void**>(&d_data.Materials), sizeof(Material) * ray_tracer->material_count); for (int i = 0; i < 1; i++) d_data.Textures[i] = ray_tracer->textlist[i]; printf(BLU"[GPU]" YEL"Transferring BVH Data...\n" RESET); d_data.bvh = ToDevice(HostScene::Instance()->bvh); printf(BLU"[GPU]" GRN"Transfer BVH Data Completed\n" RESET); } void DeviceManager::Run() { //****** host->device ****** hipMemcpy(devicde_float_data, host_float_data, ray_tracer->width * ray_tracer->height * 4 * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_camera, Engine::Instance()->camera, sizeof(Camera), hipMemcpyHostToDevice); hipMemcpy(d_data.Materials, ray_tracer->materials, sizeof(Material) * ray_tracer->material_count, hipMemcpyHostToDevice); d_data.quick = ray_tracer->IPR_Quick; auto mst = 0; auto spp = 0; if (d_data.quick || ray_tracer->IPR_reset_once) { mst = 3; spp = 1; } else { mst = 8; spp = Setting::SPP; } ray_tracer->sampled += spp; IPRSampler << <grid, block >> > (ray_tracer->width, ray_tracer->height, (rand() / (RAND_MAX + 1.0)) * 1000, spp, ray_tracer->sampled, mst, 0, devicde_float_data, rng_states, d_camera,d_data); Float2Byte <<<grid, block >> > (d_data.quick,ray_tracer->width, ray_tracer->sampled,spp, devicde_float_data, devicde_byte_data); hipDeviceSynchronize(); //****** Device->host ****** hipMemcpy(host_float_data, devicde_float_data, ray_tracer->width * ray_tracer->height * 4 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(ray_tracer->data, devicde_byte_data, ray_tracer->width * ray_tracer->height * 4 * sizeof(GLbyte), hipMemcpyDeviceToHost); const auto error = hipGetLastError(); if (error != 0)printf(RED"[ERROR]Cuda Error %d\n" RESET, error); }
1967fd0dac5f629b6461b7f20080da7cd2035d48.cu
#include "DeviceManager.h" #include <cuda_runtime_api.h> #include <cmath> #include <cstdlib> #include <helper_cuda.h> #include <curand_kernel.h> #include "RayTracer.h" #include "Setting.h" #include "RTSampler.h" #include <iostream> #include "Float2Byte.h" #include "Engine.h" #include "Material.h" #include "Scene.h" DeviceManager::DeviceManager(){} DeviceManager::~DeviceManager(){} void DeviceManager::PrintDeviceInfo() { auto device_count = 0; cudaGetDeviceCount(&device_count); if (device_count == 0) { printf("没有支持CUDA的设备!\n"); return; } for (auto dev = 0; dev < device_count; dev++) { cudaSetDevice(dev); cudaDeviceProp device_prop{}; cudaGetDeviceProperties(&device_prop, dev); printf("设备 %d: \"%s\"\n", dev, device_prop.name); char msg[256]; sprintf_s(msg, sizeof(msg), "global memory大小: %.0f MBytes " "(%llu bytes)\n", static_cast<float>(device_prop.totalGlobalMem / 1048576.0f), static_cast<unsigned long long>(device_prop.totalGlobalMem)); printf("%s", msg); printf("SM数: %2d \n每SM CUDA核心数: %3d \n总CUDA核心数: %d \n", device_prop.multiProcessorCount, _ConvertSMVer2Cores(device_prop.major, device_prop.minor), _ConvertSMVer2Cores(device_prop.major, device_prop.minor) * device_prop.multiProcessorCount); printf("静态内存大小: %zu bytes\n", device_prop.totalConstMem); printf("每block共享内存大小: %zu bytes\n", device_prop.sharedMemPerBlock); printf("每block寄存器数: %d\n", device_prop.regsPerBlock); printf("线程束大小: %d\n", device_prop.warpSize); printf("每处理器最大线程数: %d\n", device_prop.maxThreadsPerMultiProcessor); printf("每block最大线程数: %d\n", device_prop.maxThreadsPerBlock); printf("线程块最大维度大小 (%d, %d, %d)\n", device_prop.maxThreadsDim[0], device_prop.maxThreadsDim[1], device_prop.maxThreadsDim[2]); printf("网格最大维度大小 (%d, %d, %d)\n", device_prop.maxGridSize[0], device_prop.maxGridSize[1], device_prop.maxGridSize[2]); printf("\n"); } printf("************设备信息打印完毕************\n\n"); } void DeviceManager::Init(RayTracer* tracer,HostScene scene) { ray_tracer = tracer; grid = dim3(ray_tracer->width / Setting::BlockSize, ray_tracer->height / Setting::BlockSize); block = dim3(Setting::BlockSize, Setting::BlockSize); const size_t newHeapSize = 4608ull * 1024ull * 1024ull;; cudaDeviceSetLimit(cudaLimitStackSize, newHeapSize); host_float_data = new float[ray_tracer->width * ray_tracer->height * 4]; cudaMalloc(reinterpret_cast<void**>(&devicde_float_data), ray_tracer->width * ray_tracer->height * 4 * sizeof(float)); cudaMalloc(reinterpret_cast<void**>(&devicde_byte_data), ray_tracer->width * ray_tracer->height * 4 * sizeof(GLbyte)); cudaMalloc(reinterpret_cast<void**>(&rng_states), grid.x * block.x * sizeof(curandState)); cudaMalloc(reinterpret_cast<void**>(&d_camera), sizeof(Camera)); d_data = RTHostData(); cudaMalloc(reinterpret_cast<void**>(&d_data.Materials), sizeof(Material) * ray_tracer->material_count); for (int i = 0; i < 1; i++) d_data.Textures[i] = ray_tracer->textlist[i]; printf(BLU"[GPU]" YEL"Transferring BVH Data...\n" RESET); d_data.bvh = ToDevice(HostScene::Instance()->bvh); printf(BLU"[GPU]" GRN"Transfer BVH Data Completed\n" RESET); } void DeviceManager::Run() { //****** 复制输入内存 host->device ****** cudaMemcpy(devicde_float_data, host_float_data, ray_tracer->width * ray_tracer->height * 4 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_camera, Engine::Instance()->camera, sizeof(Camera), cudaMemcpyHostToDevice); cudaMemcpy(d_data.Materials, ray_tracer->materials, sizeof(Material) * ray_tracer->material_count, cudaMemcpyHostToDevice); d_data.quick = ray_tracer->IPR_Quick; auto mst = 0; auto spp = 0; if (d_data.quick || ray_tracer->IPR_reset_once) { mst = 3; spp = 1; } else { mst = 8; spp = Setting::SPP; } ray_tracer->sampled += spp; IPRSampler << <grid, block >> > (ray_tracer->width, ray_tracer->height, (rand() / (RAND_MAX + 1.0)) * 1000, spp, ray_tracer->sampled, mst, 0, devicde_float_data, rng_states, d_camera,d_data); Float2Byte <<<grid, block >> > (d_data.quick,ray_tracer->width, ray_tracer->sampled,spp, devicde_float_data, devicde_byte_data); cudaDeviceSynchronize(); //****** 复制输出内存 Device->host ****** cudaMemcpy(host_float_data, devicde_float_data, ray_tracer->width * ray_tracer->height * 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(ray_tracer->data, devicde_byte_data, ray_tracer->width * ray_tracer->height * 4 * sizeof(GLbyte), cudaMemcpyDeviceToHost); const auto error = cudaGetLastError(); if (error != 0)printf(RED"[ERROR]Cuda Error %d\n" RESET, error); }
6ab528547e0f55ba87a97bfc64d87488a578dee1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> #include <chrono> #include "ProjectDefinitions.h" using namespace std::chrono; #ifdef USE_ROCM extern "C" hipError_t CheckForError(char * str); __global__ void naive_GPU_FindValuesAboveThreshold3DPredict( char* d_response, const float* d_signal, const float* d_threshold, uint32_t signalLength, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength - templateLength)) { if (d_signal[index + (templateId*signalLength)] >= d_threshold[templateId]) { d_response[index + (templateId*signalLength)] = 1; } else { d_response[index + (templateId*signalLength)] = 0; } } } __global__ void naive_GPU_FindValuesAboveThreshold3D( char* d_response, const float* d_signal, float threshold, uint32_t signalLength, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength-templateLength)) { if (d_signal[index + (templateId*signalLength)] >= threshold) { d_response[index + (templateId*signalLength)] = 1; } else { d_response[index + (templateId*signalLength)] = 0; } } } __global__ void naive_GPU_FindPeaks3D( const float* d_signal, char* aboveThresholdindicator, uint32_t signalLength, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength - templateLength)) { // Assign first and last element first if (index > 1 || index < ((signalLength - templateLength) - 1)) { if (aboveThresholdindicator[index + (templateId*signalLength)] > 0) { if (d_signal[index + (templateId*signalLength)] > d_signal[index + (templateId*signalLength) - 1] && d_signal[index + (templateId*signalLength)] >= d_signal[index + (templateId*signalLength) + 1]) { //numberOfPeaks++; } else { aboveThresholdindicator[index + (templateId*signalLength)] = 0; } } } else { if (index < 1) { if (d_signal[index + (templateId*signalLength)] > d_signal[index + (templateId*signalLength) + 1] && aboveThresholdindicator[index + (templateId*signalLength)] > 0) { //numberOfPeaks++; } else { aboveThresholdindicator[index + (templateId*signalLength)] = 0; } } if (index > ((signalLength - templateLength) - 2)) { if (d_signal[index + (templateId*signalLength)] > d_signal[index + (templateId*signalLength) - 1] && aboveThresholdindicator[index + (templateId*signalLength)] > 0) { //numberOfPeaks++; } else { aboveThresholdindicator[index + (templateId*signalLength)] = 0; } } } } } __global__ void naive_GPU_MakesFoundTimes3D( uint32_t* dev_result, char* aboveThresholdindicator, uint32_t signalLength, uint32_t maxDimOfResult, uint32_t* dev_counter, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength - templateLength)) { // Assign first and last element first if (aboveThresholdindicator[index + (templateId*signalLength)] > 0) { register uint32_t i = atomicAdd(&dev_counter[templateId], 1); if (i < maxDimOfResult) { dev_result[i + (templateId*maxDimOfResult)] = index; } } } } __global__ void naive_compare_with_truth_table3D( uint32_t* d_TPCounter, uint32_t* d_truthTable, uint32_t* d_estimationTable, uint32_t* d_truthTableStartInd, uint32_t* d_truthTableStartSize, uint32_t* d_estimationTableSize, uint16_t* d_peakOffset, uint32_t maxDimOfResult ) { bool TP = false; uint32_t offsetSpike = 0; uint32_t I = threadIdx.x + (blockIdx.x*blockDim.x); // e.g threadIdx.x = 2, blockIdx.x = 4, blockDim.c = 1024 --> (4*1024)+2 = 4098 uint16_t templateId = blockIdx.y; if (TEMPLATE_CROPPED_LENGTH > ((d_peakOffset[templateId] * 2) + 1)) { offsetSpike = d_peakOffset[templateId]; } else { offsetSpike = (d_peakOffset[templateId] / 2); } if (I < d_estimationTableSize[templateId]) { bool timeStampLocated = false; for (uint32_t i = d_truthTableStartInd[templateId]; i < (d_truthTableStartInd[templateId] + d_truthTableStartSize[templateId]); i++) { if ((d_estimationTable[I + (templateId*maxDimOfResult)] + offsetSpike) == (d_truthTable[i] - 1)) { TP = true; timeStampLocated = true; break; } } if (!timeStampLocated && ACCEPTED_TIMELINE_SLACK > 0) { for (uint32_t Y = 1; Y <= ACCEPTED_TIMELINE_SLACK; Y++) { for (uint32_t i = d_truthTableStartInd[templateId]; i < (d_truthTableStartInd[templateId] + d_truthTableStartSize[templateId]); i++) { if ((d_estimationTable[I + (templateId*maxDimOfResult)] + offsetSpike) == ((d_truthTable[i] - 1) - Y)) { TP = true; timeStampLocated = true; break; } } if (timeStampLocated) { break; } if (!timeStampLocated) { for (uint32_t i = d_truthTableStartInd[templateId]; i < (d_truthTableStartInd[templateId] + d_truthTableStartSize[templateId]); i++) { if ((d_estimationTable[I + (templateId*maxDimOfResult)] + offsetSpike) == ((d_truthTable[i] - 1) + Y)) { TP = true; timeStampLocated = true; break; } } } if (timeStampLocated) { break; } } } } if (TP) { atomicAdd(&d_TPCounter[templateId], 1); } } extern "C" void PredictCUDA(const float *dev_signal, char *dev_aboveThreshold, uint32_t *dev_foundTimes, uint32_t *dev_foundTimesCounter, uint16_t templateLength, uint32_t signalLength, uint16_t numberOfTemplates, float *dev_threshold) { uint32_t GridXSize = signalLength / MAXIMUM_NUMBER_OF_THREADS; if (signalLength % MAXIMUM_NUMBER_OF_THREADS != 0) { GridXSize++; } const dim3 blockSize(MAXIMUM_NUMBER_OF_THREADS, 1, 1); const dim3 gridsize(GridXSize, numberOfTemplates, 1); naive_GPU_FindValuesAboveThreshold3DPredict << <gridsize, blockSize >> > (dev_aboveThreshold, dev_signal, dev_threshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindValuesAboveThreshold3DPredict"); naive_GPU_FindPeaks3D << <gridsize, blockSize >> > (dev_signal, dev_aboveThreshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindPeaks3D"); naive_GPU_MakesFoundTimes3D << <gridsize, blockSize >> > (dev_foundTimes, dev_aboveThreshold, signalLength, (uint32_t)MAXIMUM_PREDICTION_SAMPLES, dev_foundTimesCounter, templateLength); CheckForError((char*)"naive_GPU_MakesFoundTimes3D"); } extern "C" void TrainPart1CUDA(const float *dev_signal, char *dev_aboveThreshold, uint32_t *dev_foundTimes, uint32_t *dev_foundTimesCounter, uint32_t *dev_TPCounter, uint16_t *dev_peaksOffsets, uint32_t *devTruthTable, uint32_t *devTruthTableSize, uint32_t *devTruthTableStartInd, uint16_t templateLength, uint32_t signalLength, uint16_t numberOfTemplates, float threshold) { uint32_t GridXSize = signalLength / MAXIMUM_NUMBER_OF_THREADS; if (signalLength % MAXIMUM_NUMBER_OF_THREADS != 0) { GridXSize++; } const dim3 blockSize(MAXIMUM_NUMBER_OF_THREADS, 1, 1); const dim3 gridsize(GridXSize, numberOfTemplates, 1); naive_GPU_FindValuesAboveThreshold3D << <gridsize, blockSize >> > (dev_aboveThreshold, dev_signal, threshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindValuesAboveThreshold3D"); naive_GPU_FindPeaks3D << <gridsize, blockSize >> > (dev_signal, dev_aboveThreshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindPeaks3D"); naive_GPU_MakesFoundTimes3D << <gridsize, blockSize >> > (dev_foundTimes, dev_aboveThreshold, signalLength, (uint32_t)MAXIMUM_PREDICTION_SAMPLES, dev_foundTimesCounter, templateLength); CheckForError((char*)"naive_GPU_MakesFoundTimes3D"); const dim3 blockSizeCompare(MAXIMUM_NUMBER_OF_THREADS_COMPARING, 1, 1); GridXSize = MAXIMUM_PREDICTION_SAMPLES / MAXIMUM_NUMBER_OF_THREADS_COMPARING; if (MAXIMUM_PREDICTION_SAMPLES % MAXIMUM_NUMBER_OF_THREADS_COMPARING != 0) { GridXSize++; } const dim3 gridsizeCompare(GridXSize, numberOfTemplates, 1); naive_compare_with_truth_table3D << <gridsizeCompare, blockSizeCompare >> > (dev_TPCounter, devTruthTable, dev_foundTimes, devTruthTableStartInd, devTruthTableSize, dev_foundTimesCounter, dev_peaksOffsets, (uint32_t)MAXIMUM_PREDICTION_SAMPLES); CheckForError((char*)"naive_compare_with_truth_table3D"); } #endif
6ab528547e0f55ba87a97bfc64d87488a578dee1.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <device_functions.h> #include <cuda.h> #include <stdio.h> #include <iostream> #include <chrono> #include "ProjectDefinitions.h" using namespace std::chrono; #ifdef USE_CUDA extern "C" cudaError_t CheckForError(char * str); __global__ void naive_GPU_FindValuesAboveThreshold3DPredict( char* d_response, const float* d_signal, const float* d_threshold, uint32_t signalLength, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength - templateLength)) { if (d_signal[index + (templateId*signalLength)] >= d_threshold[templateId]) { d_response[index + (templateId*signalLength)] = 1; } else { d_response[index + (templateId*signalLength)] = 0; } } } __global__ void naive_GPU_FindValuesAboveThreshold3D( char* d_response, const float* d_signal, float threshold, uint32_t signalLength, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength-templateLength)) { if (d_signal[index + (templateId*signalLength)] >= threshold) { d_response[index + (templateId*signalLength)] = 1; } else { d_response[index + (templateId*signalLength)] = 0; } } } __global__ void naive_GPU_FindPeaks3D( const float* d_signal, char* aboveThresholdindicator, uint32_t signalLength, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength - templateLength)) { // Assign first and last element first if (index > 1 || index < ((signalLength - templateLength) - 1)) { if (aboveThresholdindicator[index + (templateId*signalLength)] > 0) { if (d_signal[index + (templateId*signalLength)] > d_signal[index + (templateId*signalLength) - 1] && d_signal[index + (templateId*signalLength)] >= d_signal[index + (templateId*signalLength) + 1]) { //numberOfPeaks++; } else { aboveThresholdindicator[index + (templateId*signalLength)] = 0; } } } else { if (index < 1) { if (d_signal[index + (templateId*signalLength)] > d_signal[index + (templateId*signalLength) + 1] && aboveThresholdindicator[index + (templateId*signalLength)] > 0) { //numberOfPeaks++; } else { aboveThresholdindicator[index + (templateId*signalLength)] = 0; } } if (index > ((signalLength - templateLength) - 2)) { if (d_signal[index + (templateId*signalLength)] > d_signal[index + (templateId*signalLength) - 1] && aboveThresholdindicator[index + (templateId*signalLength)] > 0) { //numberOfPeaks++; } else { aboveThresholdindicator[index + (templateId*signalLength)] = 0; } } } } } __global__ void naive_GPU_MakesFoundTimes3D( uint32_t* dev_result, char* aboveThresholdindicator, uint32_t signalLength, uint32_t maxDimOfResult, uint32_t* dev_counter, uint16_t templateLength ) { uint32_t index = threadIdx.x + (blockDim.x*blockIdx.x); uint16_t templateId = blockIdx.y; if (index < (signalLength - templateLength)) { // Assign first and last element first if (aboveThresholdindicator[index + (templateId*signalLength)] > 0) { register uint32_t i = atomicAdd(&dev_counter[templateId], 1); if (i < maxDimOfResult) { dev_result[i + (templateId*maxDimOfResult)] = index; } } } } __global__ void naive_compare_with_truth_table3D( uint32_t* d_TPCounter, uint32_t* d_truthTable, uint32_t* d_estimationTable, uint32_t* d_truthTableStartInd, uint32_t* d_truthTableStartSize, uint32_t* d_estimationTableSize, uint16_t* d_peakOffset, uint32_t maxDimOfResult ) { bool TP = false; uint32_t offsetSpike = 0; uint32_t I = threadIdx.x + (blockIdx.x*blockDim.x); // e.g threadIdx.x = 2, blockIdx.x = 4, blockDim.c = 1024 --> (4*1024)+2 = 4098 uint16_t templateId = blockIdx.y; if (TEMPLATE_CROPPED_LENGTH > ((d_peakOffset[templateId] * 2) + 1)) { offsetSpike = d_peakOffset[templateId]; } else { offsetSpike = (d_peakOffset[templateId] / 2); } if (I < d_estimationTableSize[templateId]) { bool timeStampLocated = false; for (uint32_t i = d_truthTableStartInd[templateId]; i < (d_truthTableStartInd[templateId] + d_truthTableStartSize[templateId]); i++) { if ((d_estimationTable[I + (templateId*maxDimOfResult)] + offsetSpike) == (d_truthTable[i] - 1)) { TP = true; timeStampLocated = true; break; } } if (!timeStampLocated && ACCEPTED_TIMELINE_SLACK > 0) { for (uint32_t Y = 1; Y <= ACCEPTED_TIMELINE_SLACK; Y++) { for (uint32_t i = d_truthTableStartInd[templateId]; i < (d_truthTableStartInd[templateId] + d_truthTableStartSize[templateId]); i++) { if ((d_estimationTable[I + (templateId*maxDimOfResult)] + offsetSpike) == ((d_truthTable[i] - 1) - Y)) { TP = true; timeStampLocated = true; break; } } if (timeStampLocated) { break; } if (!timeStampLocated) { for (uint32_t i = d_truthTableStartInd[templateId]; i < (d_truthTableStartInd[templateId] + d_truthTableStartSize[templateId]); i++) { if ((d_estimationTable[I + (templateId*maxDimOfResult)] + offsetSpike) == ((d_truthTable[i] - 1) + Y)) { TP = true; timeStampLocated = true; break; } } } if (timeStampLocated) { break; } } } } if (TP) { atomicAdd(&d_TPCounter[templateId], 1); } } extern "C" void PredictCUDA(const float *dev_signal, char *dev_aboveThreshold, uint32_t *dev_foundTimes, uint32_t *dev_foundTimesCounter, uint16_t templateLength, uint32_t signalLength, uint16_t numberOfTemplates, float *dev_threshold) { uint32_t GridXSize = signalLength / MAXIMUM_NUMBER_OF_THREADS; if (signalLength % MAXIMUM_NUMBER_OF_THREADS != 0) { GridXSize++; } const dim3 blockSize(MAXIMUM_NUMBER_OF_THREADS, 1, 1); const dim3 gridsize(GridXSize, numberOfTemplates, 1); naive_GPU_FindValuesAboveThreshold3DPredict << <gridsize, blockSize >> > (dev_aboveThreshold, dev_signal, dev_threshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindValuesAboveThreshold3DPredict"); naive_GPU_FindPeaks3D << <gridsize, blockSize >> > (dev_signal, dev_aboveThreshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindPeaks3D"); naive_GPU_MakesFoundTimes3D << <gridsize, blockSize >> > (dev_foundTimes, dev_aboveThreshold, signalLength, (uint32_t)MAXIMUM_PREDICTION_SAMPLES, dev_foundTimesCounter, templateLength); CheckForError((char*)"naive_GPU_MakesFoundTimes3D"); } extern "C" void TrainPart1CUDA(const float *dev_signal, char *dev_aboveThreshold, uint32_t *dev_foundTimes, uint32_t *dev_foundTimesCounter, uint32_t *dev_TPCounter, uint16_t *dev_peaksOffsets, uint32_t *devTruthTable, uint32_t *devTruthTableSize, uint32_t *devTruthTableStartInd, uint16_t templateLength, uint32_t signalLength, uint16_t numberOfTemplates, float threshold) { uint32_t GridXSize = signalLength / MAXIMUM_NUMBER_OF_THREADS; if (signalLength % MAXIMUM_NUMBER_OF_THREADS != 0) { GridXSize++; } const dim3 blockSize(MAXIMUM_NUMBER_OF_THREADS, 1, 1); const dim3 gridsize(GridXSize, numberOfTemplates, 1); naive_GPU_FindValuesAboveThreshold3D << <gridsize, blockSize >> > (dev_aboveThreshold, dev_signal, threshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindValuesAboveThreshold3D"); naive_GPU_FindPeaks3D << <gridsize, blockSize >> > (dev_signal, dev_aboveThreshold, signalLength, templateLength); CheckForError((char*)"naive_GPU_FindPeaks3D"); naive_GPU_MakesFoundTimes3D << <gridsize, blockSize >> > (dev_foundTimes, dev_aboveThreshold, signalLength, (uint32_t)MAXIMUM_PREDICTION_SAMPLES, dev_foundTimesCounter, templateLength); CheckForError((char*)"naive_GPU_MakesFoundTimes3D"); const dim3 blockSizeCompare(MAXIMUM_NUMBER_OF_THREADS_COMPARING, 1, 1); GridXSize = MAXIMUM_PREDICTION_SAMPLES / MAXIMUM_NUMBER_OF_THREADS_COMPARING; if (MAXIMUM_PREDICTION_SAMPLES % MAXIMUM_NUMBER_OF_THREADS_COMPARING != 0) { GridXSize++; } const dim3 gridsizeCompare(GridXSize, numberOfTemplates, 1); naive_compare_with_truth_table3D << <gridsizeCompare, blockSizeCompare >> > (dev_TPCounter, devTruthTable, dev_foundTimes, devTruthTableStartInd, devTruthTableSize, dev_foundTimesCounter, dev_peaksOffsets, (uint32_t)MAXIMUM_PREDICTION_SAMPLES); CheckForError((char*)"naive_compare_with_truth_table3D"); } #endif
9ff095ff57212a16f2764c51026a748d2319bb31.hip
// !!! This is a file automatically generated by hipify!!! /** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg <[email protected]> * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the deriving author. * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "hipEventDefault", "hipEventBlockingSync", "hipEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "hipDeviceScheduleAuto", "hipDeviceScheduleSpin", "hipDeviceScheduleYield", "INVALID", "hipDeviceScheduleBlockingSync", NULL }; // System includes // CUDA runtime #include "hip/hip_runtime.h" // helper functions and utilities to work with CUDA #include "../helper_cuda.h" #include <cuda/api_wrappers.hpp> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif #include <cstdlib> #include <fstream> #include <vector> #include <iostream> #include <algorithm> // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { std::cout << i << ": " << a[i] << " " << c << "\n"; return false; } } return true; } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { std::cout << "Usage: " << sSDKsample << " [options below]\n" << "\t--sync_method=n for CPU/GPU synchronization\n" << "\t n=" << sSyncMethod[0] << "\n" << "\t n=" << sSyncMethod[1] << "\n" << "\t n=" << sSyncMethod[2] << "\n" << "\t <Default> n=" << sSyncMethod[4] << "\n" << "\t--use_generic_memory (default) use generic page-aligned for system memory\n" << "\t--use_cuda_malloc_host (optional) use hipHostMalloc to allocate system memory\n"; } int main(int argc, char **argv) { int cuda_device_id = 0; int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using hipHostMalloc() int device_sync_method = hipDeviceScheduleBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { std::cout << "Device synchronization method set to = " << sSyncMethod[device_sync_method] << "\n"; std::cout << "Setting reps to 100 to demonstrate steady state\n"; nreps = 100; } else { std::cout << "Invalid command line option sync_method=\"" << device_sync_method << "\"\n"; return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { std::cout << "To simplify this example, support for using cuda_malloc_host instead of " << "pinned memory has been dropped.\n"; return EXIT_FAILURE; } std::cout << "\n> "; cuda_device_id = findCudaDevice(argc, (const char **)argv); // check the compute capability of the device auto num_devices = cuda::device::count(); if ( 0 == num_devices) { std::cerr << "your system does not have a CUDA capable device, waiving test...\n"; return EXIT_WAIVED; } // check if the command-line chosen device ID is within range, exit if not if (cuda_device_id >= num_devices) { std::cout << "cuda_device=" << cuda_device_id << " is invalid, " << "must choose device ID between 0 and " << num_devices-1 << "\n"; return EXIT_FAILURE; } cuda::device::current::set(cuda_device_id); auto current_device = cuda::device::current::get(); // Checking for compute capabilities auto properties = current_device.properties(); auto compute_capability = properties.compute_capability(); if (compute_capability < cuda::device::compute_capability_t({1, 1}) ) { std::cout << properties.name << " does not have Compute Capability 1.1 or newer. Reducing workload.\n"; } if (compute_capability.major >= 2) { niterations = 5; } else { if (compute_capability.minor > 1) { niterations = 5; } else { niterations = 1; // reduced workload for compute capability 1.0 and 1.1 } } // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false std::cout << "Device: <" << properties.name << "> canMapHostMemory: " << (properties.canMapHostMemory ? "Yes" : "No") << "\n"; if (not properties.can_map_host_memory()) { std::cout << "Cannot allocate pinned memory (and map GPU device memory to it); aborting.\n"; return EXIT_FAILURE; } // Anything that is less than 32 Cores will have scaled down workload auto faux_cores_per_sm = compute_capability.max_in_flight_threads_per_processor(); auto faux_cores_overall = properties.max_in_flight_threads_on_device(); scale_factor = max((32.0f / faux_cores_overall), 1.0f); n = (int)rint((float)n / scale_factor); std::cout << "> CUDA Capable: SM " << compute_capability.major << "." << compute_capability.minor << " hardware\n"; std::cout << "> " << properties.multiProcessorCount << " Multiprocessor(s)" << " x " << faux_cores_per_sm << " (Cores/Multiprocessor) = " << faux_cores_overall << " (Cores)\n"; std::cout << "> scale_factor = " << 1.0f/scale_factor << "\n"; std::cout << "> array_size = " << n << "\n\n"; // enable use of blocking sync, to reduce CPU usage std::cout << "> Using CPU/GPU Device Synchronization method " << sDeviceSyncMethod[device_sync_method] << "\n"; cuda::host_thread_synch_scheduling_policy_t policy; switch(device_sync_method) { case 0: policy = cuda::heuristic; break; case 1: policy = cuda::spin; break; case 2: policy = cuda::yield; break; case 4: policy = cuda::block; break; default: // should not be able to get here exit(EXIT_FAILURE); } current_device.set_synch_scheduling_policy(policy); current_device.enable_mapping_host_memory(); // allocate host memory int c = 5; // value to which the array will be initialized // Allocate Host memory auto h_a = cuda::memory::host::make_unique<int[]>(n); // allocate device memory // pointers to data and init value in the device memory auto d_a = cuda::memory::device::make_unique<int[]>(cuda_device_id, n); auto d_c = cuda::memory::device::make_unique<int>(cuda_device_id); cuda::memory::copy_single(*d_c.get(), c); std::cout << "\nStarting Test\n"; // allocate and initialize an array of stream handles std::vector<cuda::stream_t<>> streams; std::generate_n( std::back_inserter(streams), nstreams, [&current_device]() { // Note: we could omit the specific requirement of synchronization // with the default stream, since that's the CUDA default - but I // think it's important to state that's the case return current_device.create_stream( cuda::stream::implicitly_synchronizes_with_default_stream); } ); // create CUDA event handles // use blocking sync auto use_blocking_sync = (device_sync_method == hipDeviceScheduleBlockingSync); auto start_event = cuda::event::create(current_device, use_blocking_sync); auto stop_event = cuda::event::create(current_device, use_blocking_sync); // time memcopy from device start_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed cuda::memory::async::copy(h_a.get(), d_a.get(), nbytes, streams[0].id()); stop_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed stop_event.synchronize(); // block until the event is actually recorded auto time_memcpy = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << "memcopy:\t" << time_memcpy << "\n"; // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[0].id(), d_a.get(), d_c.get(), niterations); stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto time_kernel = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << "kernel:\t\t" << time_kernel << "\n"; ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, 0, d_a.get(), d_c.get(), niterations); cuda::memory::copy(h_a.get(), d_a.get(), nbytes); } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto elapsed_time = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << "non-streamed:\t" << elapsed_time / nreps << "\n"; ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(h_a.get(), 255, nbytes); // set host memory bits to all 1s, for testing correctness cuda::memory::device::zero(d_a.get(), nbytes); // set device memory to all 0s, for testing correctness start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[i].id(), d_a.get() + i *n / nstreams, d_c.get(), niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { cuda::memory::async::copy( h_a.get() + i * n / nstreams, d_a.get() + i * n / nstreams, nbytes / nstreams, streams[i].id()); } } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); elapsed_time = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << nstreams <<" streams:\t" << elapsed_time / nreps << "\n"; // check whether the output is correct std::cout << "-------------------------------\n"; bool bResults = correct_data(h_a.get(), n, c*nreps*niterations); std::cout << (bResults ? "SUCCESS" : "FAILURE") << "\n"; return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
9ff095ff57212a16f2764c51026a748d2319bb31.cu
/** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg <[email protected]> * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the deriving author. * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "cudaEventDefault", "cudaEventBlockingSync", "cudaEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "cudaDeviceScheduleAuto", "cudaDeviceScheduleSpin", "cudaDeviceScheduleYield", "INVALID", "cudaDeviceScheduleBlockingSync", NULL }; // System includes // CUDA runtime #include "cuda_runtime.h" // helper functions and utilities to work with CUDA #include "../helper_cuda.h" #include <cuda/api_wrappers.hpp> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif #include <cstdlib> #include <fstream> #include <vector> #include <iostream> #include <algorithm> // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { std::cout << i << ": " << a[i] << " " << c << "\n"; return false; } } return true; } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { std::cout << "Usage: " << sSDKsample << " [options below]\n" << "\t--sync_method=n for CPU/GPU synchronization\n" << "\t n=" << sSyncMethod[0] << "\n" << "\t n=" << sSyncMethod[1] << "\n" << "\t n=" << sSyncMethod[2] << "\n" << "\t <Default> n=" << sSyncMethod[4] << "\n" << "\t--use_generic_memory (default) use generic page-aligned for system memory\n" << "\t--use_cuda_malloc_host (optional) use cudaMallocHost to allocate system memory\n"; } int main(int argc, char **argv) { int cuda_device_id = 0; int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using cudaHostAlloc() int device_sync_method = cudaDeviceBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { std::cout << "Device synchronization method set to = " << sSyncMethod[device_sync_method] << "\n"; std::cout << "Setting reps to 100 to demonstrate steady state\n"; nreps = 100; } else { std::cout << "Invalid command line option sync_method=\"" << device_sync_method << "\"\n"; return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { std::cout << "To simplify this example, support for using cuda_malloc_host instead of " << "pinned memory has been dropped.\n"; return EXIT_FAILURE; } std::cout << "\n> "; cuda_device_id = findCudaDevice(argc, (const char **)argv); // check the compute capability of the device auto num_devices = cuda::device::count(); if ( 0 == num_devices) { std::cerr << "your system does not have a CUDA capable device, waiving test...\n"; return EXIT_WAIVED; } // check if the command-line chosen device ID is within range, exit if not if (cuda_device_id >= num_devices) { std::cout << "cuda_device=" << cuda_device_id << " is invalid, " << "must choose device ID between 0 and " << num_devices-1 << "\n"; return EXIT_FAILURE; } cuda::device::current::set(cuda_device_id); auto current_device = cuda::device::current::get(); // Checking for compute capabilities auto properties = current_device.properties(); auto compute_capability = properties.compute_capability(); if (compute_capability < cuda::device::compute_capability_t({1, 1}) ) { std::cout << properties.name << " does not have Compute Capability 1.1 or newer. Reducing workload.\n"; } if (compute_capability.major >= 2) { niterations = 5; } else { if (compute_capability.minor > 1) { niterations = 5; } else { niterations = 1; // reduced workload for compute capability 1.0 and 1.1 } } // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false std::cout << "Device: <" << properties.name << "> canMapHostMemory: " << (properties.canMapHostMemory ? "Yes" : "No") << "\n"; if (not properties.can_map_host_memory()) { std::cout << "Cannot allocate pinned memory (and map GPU device memory to it); aborting.\n"; return EXIT_FAILURE; } // Anything that is less than 32 Cores will have scaled down workload auto faux_cores_per_sm = compute_capability.max_in_flight_threads_per_processor(); auto faux_cores_overall = properties.max_in_flight_threads_on_device(); scale_factor = max((32.0f / faux_cores_overall), 1.0f); n = (int)rint((float)n / scale_factor); std::cout << "> CUDA Capable: SM " << compute_capability.major << "." << compute_capability.minor << " hardware\n"; std::cout << "> " << properties.multiProcessorCount << " Multiprocessor(s)" << " x " << faux_cores_per_sm << " (Cores/Multiprocessor) = " << faux_cores_overall << " (Cores)\n"; std::cout << "> scale_factor = " << 1.0f/scale_factor << "\n"; std::cout << "> array_size = " << n << "\n\n"; // enable use of blocking sync, to reduce CPU usage std::cout << "> Using CPU/GPU Device Synchronization method " << sDeviceSyncMethod[device_sync_method] << "\n"; cuda::host_thread_synch_scheduling_policy_t policy; switch(device_sync_method) { case 0: policy = cuda::heuristic; break; case 1: policy = cuda::spin; break; case 2: policy = cuda::yield; break; case 4: policy = cuda::block; break; default: // should not be able to get here exit(EXIT_FAILURE); } current_device.set_synch_scheduling_policy(policy); current_device.enable_mapping_host_memory(); // allocate host memory int c = 5; // value to which the array will be initialized // Allocate Host memory auto h_a = cuda::memory::host::make_unique<int[]>(n); // allocate device memory // pointers to data and init value in the device memory auto d_a = cuda::memory::device::make_unique<int[]>(cuda_device_id, n); auto d_c = cuda::memory::device::make_unique<int>(cuda_device_id); cuda::memory::copy_single(*d_c.get(), c); std::cout << "\nStarting Test\n"; // allocate and initialize an array of stream handles std::vector<cuda::stream_t<>> streams; std::generate_n( std::back_inserter(streams), nstreams, [&current_device]() { // Note: we could omit the specific requirement of synchronization // with the default stream, since that's the CUDA default - but I // think it's important to state that's the case return current_device.create_stream( cuda::stream::implicitly_synchronizes_with_default_stream); } ); // create CUDA event handles // use blocking sync auto use_blocking_sync = (device_sync_method == cudaDeviceBlockingSync); auto start_event = cuda::event::create(current_device, use_blocking_sync); auto stop_event = cuda::event::create(current_device, use_blocking_sync); // time memcopy from device start_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed cuda::memory::async::copy(h_a.get(), d_a.get(), nbytes, streams[0].id()); stop_event.record(cuda::stream::default_stream_id); // record in stream-0, to ensure that all previous CUDA calls have completed stop_event.synchronize(); // block until the event is actually recorded auto time_memcpy = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << "memcopy:\t" << time_memcpy << "\n"; // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); init_array<<<blocks, threads, 0, streams[0].id()>>>(d_a.get(), d_c.get(), niterations); stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto time_kernel = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << "kernel:\t\t" << time_kernel << "\n"; ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { init_array<<<blocks, threads>>>(d_a.get(), d_c.get(), niterations); cuda::memory::copy(h_a.get(), d_a.get(), nbytes); } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); auto elapsed_time = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << "non-streamed:\t" << elapsed_time / nreps << "\n"; ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(h_a.get(), 255, nbytes); // set host memory bits to all 1s, for testing correctness cuda::memory::device::zero(d_a.get(), nbytes); // set device memory to all 0s, for testing correctness start_event.record(cuda::stream::default_stream_id); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { init_array<<<blocks, threads, 0, streams[i].id()>>>(d_a.get() + i *n / nstreams, d_c.get(), niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { cuda::memory::async::copy( h_a.get() + i * n / nstreams, d_a.get() + i * n / nstreams, nbytes / nstreams, streams[i].id()); } } stop_event.record(cuda::stream::default_stream_id); stop_event.synchronize(); elapsed_time = cuda::event::milliseconds_elapsed_between(start_event, stop_event); std::cout << nstreams <<" streams:\t" << elapsed_time / nreps << "\n"; // check whether the output is correct std::cout << "-------------------------------\n"; bool bResults = correct_data(h_a.get(), n, c*nreps*niterations); std::cout << (bResults ? "SUCCESS" : "FAILURE") << "\n"; return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
d957e9d1909f43f4e00d251fbb11eb40cd6cb80c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "QuickJoin.h" __global__ void NestedLoop(Object *objs, int start, int end) { long long thread_id = (long long)blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; long long size = end - start + 1; if (thread_id >= size*size) { return; } int i = thread_id / size; int j = thread_id - i*size; if (i <= j) { return; } if (objs[start+i].Distance(objs[start+j]) <= eps) { ++resultCount; //printf("%d <--> %d\n", start+i, start+j); } }
d957e9d1909f43f4e00d251fbb11eb40cd6cb80c.cu
#include "QuickJoin.h" __global__ void NestedLoop(Object *objs, int start, int end) { long long thread_id = (long long)blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; long long size = end - start + 1; if (thread_id >= size*size) { return; } int i = thread_id / size; int j = thread_id - i*size; if (i <= j) { return; } if (objs[start+i].Distance(objs[start+j]) <= eps) { ++resultCount; //printf("%d <--> %d\n", start+i, start+j); } }
3413650611d8bbe433d96827a9b0c2dbbc72f6b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/opencv.hpp> #include <vector> __global__ void dilate ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) { //auto i = blockIdx.x * (blockDim.x - 2) + threadIdx.x; //auto j = blockIdx.y * blockDim.y + threadIdx.y; auto i = blockIdx.x * (blockDim.x) + threadIdx.x; auto j = blockIdx.y * (blockDim.y) + threadIdx.y; if ( i > 0 && i < (cols - 1) && j > 0 && j < (rows - 1)) { for (auto c = 0; c < 3; ++c){ auto hu = umin(data[((j - 1) * cols + i - 1) * 3 + c], data[((j - 1) * cols + i + 1) * 3 + c]); hu = umin(hu, data[(( j - 1) * cols + i) * 3 + c]); auto mu = umin(data[( j * cols + i - 1) * 3 + c], data[( j * cols + i + 1) * 3 + c]); mu = umin(mu, data[( j * cols + i) * 3 + c]); auto bu = umin(data[((j + 1) * cols + i - 1) * 3 + c], data[((j + 1) * cols + i + 1) * 3 + c]); bu = umin(bu, data[((j + 1) * cols + i) * 3 + c]); auto res = umin (hu, mu); res = umin (res, bu); out[(j * cols + i) * 3 + c] = res; } } } int main() { cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g( 3 * rows * cols ); cv::Mat m_out( rows, cols, CV_8UC3, g.data() ); unsigned char * rgb_d; unsigned char * out; std::size_t size = 3 * m_in.cols * m_in.rows; // hipHostRegister(g.data(), size, hipHostRegisterDefault); hipMalloc( &rgb_d, 3 * rows * cols); hipMalloc( &out, 3 * rows * cols ); // Streams declaration. hipStream_t streams[ 2 ]; // Creation. hipStreamCreate( &streams[ 0 ] ); hipStreamCreate( &streams[ 1 ] ); hipMemcpyAsync( rgb_d, rgb, size/2, hipMemcpyHostToDevice, streams[ 0 ] ); hipMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, hipMemcpyHostToDevice, streams[ 1 ] ); dim3 t( 32, 32 ); dim3 be( 3 * (( cols ) / ((t.x - 2) + 1) ), (( rows ) / ((t.y - 2) + 1) )); // dim3 t( 16, 16 ); // dim3 be( 3 * 2 * (( cols ) / ((t.x - 2) + 1) ), (2 * ( rows ) / ((t.y - 2) + 1) )); // dim3 t( 4, 4 ); // dim3 be( 3 * 8 * (( cols ) / ((t.x - 2) + 1) ), (8 * ( rows ) / ((t.y - 2) + 1) )); hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start ); // One kernel is launched in each stream. hipLaunchKernelGGL(( dilate), dim3(be), dim3(t), 0, streams[ 0 ] , rgb_d, out, cols, rows / 2 + 2); hipLaunchKernelGGL(( dilate), dim3(be), dim3(t), 0, streams[ 1 ] , rgb_d+size/2, out+size/2, cols, rows / 2); // Sending back the resulting vector by halves. hipMemcpyAsync( g.data(), out, size/2, hipMemcpyDeviceToHost, streams[ 0 ] ); hipMemcpyAsync( g.data()+size/2, out+size/2, size/2, hipMemcpyDeviceToHost, streams[ 1 ] ); // Synchronize everything. hipDeviceSynchronize(); // Destroy streams. hipStreamDestroy(streams[0]); hipStreamDestroy(streams[1]); auto hipError_t = hipGetLastError(); // Si pas d'erreur dtecte dans le bordel ben on aura hipSuccess if (hipError_t != hipSuccess){ std::cout << hipGetErrorName(hipError_t) << std::endl; std::cout << hipGetErrorString(hipError_t) << std::endl; } else { std::cout << "Aucune erreur" << std::endl; } hipEventRecord( stop ); hipEventSynchronize( stop ); float duration = 0.0f; hipEventElapsedTime( &duration, start, stop ); std::cout << "Total: " << duration << "ms\n"; cv::imwrite( "outDilate.jpg", m_out ); hipFree( rgb_d); //hipFree( g_d); hipFree ( out); return 0; }
3413650611d8bbe433d96827a9b0c2dbbc72f6b0.cu
#include <opencv2/opencv.hpp> #include <vector> __global__ void dilate ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) { //auto i = blockIdx.x * (blockDim.x - 2) + threadIdx.x; //auto j = blockIdx.y * blockDim.y + threadIdx.y; auto i = blockIdx.x * (blockDim.x) + threadIdx.x; auto j = blockIdx.y * (blockDim.y) + threadIdx.y; if ( i > 0 && i < (cols - 1) && j > 0 && j < (rows - 1)) { for (auto c = 0; c < 3; ++c){ auto hu = umin(data[((j - 1) * cols + i - 1) * 3 + c], data[((j - 1) * cols + i + 1) * 3 + c]); hu = umin(hu, data[(( j - 1) * cols + i) * 3 + c]); auto mu = umin(data[( j * cols + i - 1) * 3 + c], data[( j * cols + i + 1) * 3 + c]); mu = umin(mu, data[( j * cols + i) * 3 + c]); auto bu = umin(data[((j + 1) * cols + i - 1) * 3 + c], data[((j + 1) * cols + i + 1) * 3 + c]); bu = umin(bu, data[((j + 1) * cols + i) * 3 + c]); auto res = umin (hu, mu); res = umin (res, bu); out[(j * cols + i) * 3 + c] = res; } } } int main() { cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g( 3 * rows * cols ); cv::Mat m_out( rows, cols, CV_8UC3, g.data() ); unsigned char * rgb_d; unsigned char * out; std::size_t size = 3 * m_in.cols * m_in.rows; // cudaHostRegister(g.data(), size, cudaHostRegisterDefault); cudaMalloc( &rgb_d, 3 * rows * cols); cudaMalloc( &out, 3 * rows * cols ); // Streams declaration. cudaStream_t streams[ 2 ]; // Creation. cudaStreamCreate( &streams[ 0 ] ); cudaStreamCreate( &streams[ 1 ] ); cudaMemcpyAsync( rgb_d, rgb, size/2, cudaMemcpyHostToDevice, streams[ 0 ] ); cudaMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, cudaMemcpyHostToDevice, streams[ 1 ] ); dim3 t( 32, 32 ); dim3 be( 3 * (( cols ) / ((t.x - 2) + 1) ), (( rows ) / ((t.y - 2) + 1) )); // dim3 t( 16, 16 ); // dim3 be( 3 * 2 * (( cols ) / ((t.x - 2) + 1) ), (2 * ( rows ) / ((t.y - 2) + 1) )); // dim3 t( 4, 4 ); // dim3 be( 3 * 8 * (( cols ) / ((t.x - 2) + 1) ), (8 * ( rows ) / ((t.y - 2) + 1) )); cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start ); // One kernel is launched in each stream. dilate<<< be, t, 0, streams[ 0 ] >>>( rgb_d, out, cols, rows / 2 + 2); dilate<<< be, t, 0, streams[ 1 ] >>>( rgb_d+size/2, out+size/2, cols, rows / 2); // Sending back the resulting vector by halves. cudaMemcpyAsync( g.data(), out, size/2, cudaMemcpyDeviceToHost, streams[ 0 ] ); cudaMemcpyAsync( g.data()+size/2, out+size/2, size/2, cudaMemcpyDeviceToHost, streams[ 1 ] ); // Synchronize everything. cudaDeviceSynchronize(); // Destroy streams. cudaStreamDestroy(streams[0]); cudaStreamDestroy(streams[1]); auto cudaError = cudaGetLastError(); // Si pas d'erreur détectée dans le bordel ben on aura cudaSuccess if (cudaError != cudaSuccess){ std::cout << cudaGetErrorName(cudaError) << std::endl; std::cout << cudaGetErrorString(cudaError) << std::endl; } else { std::cout << "Aucune erreur" << std::endl; } cudaEventRecord( stop ); cudaEventSynchronize( stop ); float duration = 0.0f; cudaEventElapsedTime( &duration, start, stop ); std::cout << "Total: " << duration << "ms\n"; cv::imwrite( "outDilate.jpg", m_out ); cudaFree( rgb_d); //cudaFree( g_d); cudaFree ( out); return 0; }
a19498cdf91ed0d99aa1a3fb320b722801ef3242.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
a19498cdf91ed0d99aa1a3fb320b722801ef3242.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
af1d9f9a7951c850d326265d5c267d9f02f8f31e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/layer_norm_op.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T> __global__ void ComputeStdDevAndFusedParamsCUDAKernel( const int N, const T epsilon, const T* mean, const T* var, T* stddev, T* scale, T* bias); template <> __global__ void ComputeStdDevAndFusedParamsCUDAKernel<float>( const int N, const float epsilon, const float* mean, const float* var, float* stddev, float* scale, float* bias) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 const float rstd = rsqrtf(__ldg(var + i) + epsilon); stddev[i] = rstd * (__ldg(var + i) + epsilon); scale[i] = rstd; bias[i] = -rstd * __ldg(mean + i); #else const float rstd = rsqrtf(var[i] + epsilon); stddev[i] = rstd * (var[i] + epsilon); scale[i] = rstd; bias[i] = -rstd * mean[i]; #endif } } template <typename T> __global__ void LayerNormForwardCUDAKernel( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float scale_val = __ldg(scale + i); const float bias_val = __ldg(bias + i); #else const float scale_val = scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 Y[index] = __ldg(X + index) * scale_val + bias_val; #else Y[index] = X[index] * scale_val + bias_val; #endif } } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < M; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(dY + index) * __ldg(X + index); db_val += __ldg(dY + index); #else ds_val += dY[index] * X[index]; db_val += dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Sum(ds_val); db_val = BlockReduce<T>(db_storage).Sum(db_val); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } template <typename T> __global__ void ComputeFusedParamsCUDAKernel( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { const T scale = T(1) / static_cast<T>(N); CUDA_1D_KERNEL_LOOP(i, M) { #if __CUDA_ARCH__ >= 350 const T rsig = T(1) / __ldg(sig + i); const T X_scale_val = (__ldg(db + i) * __ldg(mean + i) - __ldg(ds + i)) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * __ldg(mean + i) - __ldg(db + i) * rsig * scale; #else const T rsig = T(1) / sig[i]; const T X_scale_val = (db[i] * mean[i] - ds[i]) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * mean[i] - db[i] * rsig * scale; #endif } } template <typename T> __global__ void LayerNormBackwardCUDAKenrel( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float dY_scale_val = __ldg(dY_scale + i); const float X_scale_val = __ldg(X_scale + i); const float bias_val = __ldg(bias + i); #else const float dY_scale_val = dY_scale[i]; const float X_scale_val = X_scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 dX[index] = __ldg(dY + index) * dY_scale_val + __ldg(X + index) * X_scale_val + bias_val; #else dX[index] = dY[index] * dY_scale_val + X[index] * X_scale_val + bias_val; #endif } } } } // namespace template <> template <typename T> void LayerNormOp<CUDAContext>::ComputeStdDevAndFusedParams( const int N, const T* mean, const T* var, T* stddev, T* scale, T* bias, float epsilon, CUDAContext* context) { hipLaunchKernelGGL(( ComputeStdDevAndFusedParamsCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, static_cast<T>(epsilon), mean, var, stddev, scale, bias); } template <> template <typename T> void LayerNormOp<CUDAContext>::LayerNormForward( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y, CUDAContext* context) { hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T>) , dim3(::min(M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), M, N, X, scale, bias, Y); } REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>); template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeInternalGradients( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>) , dim3(::min(M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, dY, X, ds, db); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeFusedParams( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(M)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, mean, sig, ds, db, dY_scale, X_scale, bias); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::LayerNormBackward( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { hipLaunchKernelGGL(( LayerNormBackwardCUDAKenrel<T>) , dim3(::min(M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, dY_scale, dY, X_scale, X, bias, dX); } REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>); } // namespace caffe2
af1d9f9a7951c850d326265d5c267d9f02f8f31e.cu
#include "caffe2/operators/layer_norm_op.h" #include <cub/cub.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T> __global__ void ComputeStdDevAndFusedParamsCUDAKernel( const int N, const T epsilon, const T* mean, const T* var, T* stddev, T* scale, T* bias); template <> __global__ void ComputeStdDevAndFusedParamsCUDAKernel<float>( const int N, const float epsilon, const float* mean, const float* var, float* stddev, float* scale, float* bias) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 const float rstd = rsqrtf(__ldg(var + i) + epsilon); stddev[i] = rstd * (__ldg(var + i) + epsilon); scale[i] = rstd; bias[i] = -rstd * __ldg(mean + i); #else const float rstd = rsqrtf(var[i] + epsilon); stddev[i] = rstd * (var[i] + epsilon); scale[i] = rstd; bias[i] = -rstd * mean[i]; #endif } } template <typename T> __global__ void LayerNormForwardCUDAKernel( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float scale_val = __ldg(scale + i); const float bias_val = __ldg(bias + i); #else const float scale_val = scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 Y[index] = __ldg(X + index) * scale_val + bias_val; #else Y[index] = X[index] * scale_val + bias_val; #endif } } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < M; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(dY + index) * __ldg(X + index); db_val += __ldg(dY + index); #else ds_val += dY[index] * X[index]; db_val += dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Sum(ds_val); db_val = BlockReduce<T>(db_storage).Sum(db_val); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } template <typename T> __global__ void ComputeFusedParamsCUDAKernel( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { const T scale = T(1) / static_cast<T>(N); CUDA_1D_KERNEL_LOOP(i, M) { #if __CUDA_ARCH__ >= 350 const T rsig = T(1) / __ldg(sig + i); const T X_scale_val = (__ldg(db + i) * __ldg(mean + i) - __ldg(ds + i)) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * __ldg(mean + i) - __ldg(db + i) * rsig * scale; #else const T rsig = T(1) / sig[i]; const T X_scale_val = (db[i] * mean[i] - ds[i]) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * mean[i] - db[i] * rsig * scale; #endif } } template <typename T> __global__ void LayerNormBackwardCUDAKenrel( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float dY_scale_val = __ldg(dY_scale + i); const float X_scale_val = __ldg(X_scale + i); const float bias_val = __ldg(bias + i); #else const float dY_scale_val = dY_scale[i]; const float X_scale_val = X_scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 dX[index] = __ldg(dY + index) * dY_scale_val + __ldg(X + index) * X_scale_val + bias_val; #else dX[index] = dY[index] * dY_scale_val + X[index] * X_scale_val + bias_val; #endif } } } } // namespace template <> template <typename T> void LayerNormOp<CUDAContext>::ComputeStdDevAndFusedParams( const int N, const T* mean, const T* var, T* stddev, T* scale, T* bias, float epsilon, CUDAContext* context) { ComputeStdDevAndFusedParamsCUDAKernel<T> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( N, static_cast<T>(epsilon), mean, var, stddev, scale, bias); } template <> template <typename T> void LayerNormOp<CUDAContext>::LayerNormForward( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y, CUDAContext* context) { LayerNormForwardCUDAKernel<T> <<<std::min(M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(M, N, X, scale, bias, Y); } REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>); template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeInternalGradients( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { ComputeInternalGradientsCUDAKernel<T> <<<std::min(M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(M, N, dY, X, ds, db); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeFusedParams( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { ComputeFusedParamsCUDAKernel<T> <<<CAFFE_GET_BLOCKS(M), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( M, N, mean, sig, ds, db, dY_scale, X_scale, bias); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::LayerNormBackward( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { LayerNormBackwardCUDAKenrel<T> <<<std::min(M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(M, N, dY_scale, dY, X_scale, X, bias, dX); } REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>); } // namespace caffe2
261726d068a2ebe50cebb611ece826597c6166d9.hip
// !!! This is a file automatically generated by hipify!!! #include "func_cuda.h" #include <pthread.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include <stdlib.h> #include "alg_bgmodel.h" #define POINT_PER_THREAD 15 #define EDGE_WIDTH 6 //shared with other files TVehLprLcRect atCudaRoughRectsOut[MAX_LOCNUM]; l32 l32CudaRectCountOut; u8 *pu8CudaInputSrc; u8 *pu8CudaRGBOut; u8 *pu8CudaZoomOut; u8 *pu8CudaGrayOut; u8 *pu8CudaFrameCurOut; u8 *pu8CudaFGFrameOut; sem_t sem_empty, sem_full, sem_ready, sem_finish; //only used in this file u8 *pu8CudaImgBuf; u8 *pu8CudaRGBBuf; u8 *pu8CudaZoomBuf; u8 *pu8CudaGrayBuf; u8 *pu8CudaFrameCurBuf; u8 *pu8CudaFGFrameBuf; void *pvCudaBgModel; struct timeval tstart[20]; int t_cnt = -1; //thread param pthread_t tid; struct _cuda_thread_arg { l32 l32Width; l32 l32Height; l32 l32Stride; }thread_arg; //inner function param typedef struct _LCMAX_POS_ { l32 l32PosX; l32 l32PosY; l32 l32Val; }LCMAX_POS; struct stLocLcMax { u8 *pu8EdgeDensMap; u8 *pu8EdgeDensMapOrg; u8 *pu8EdgeDensMap2; u8 *pu8EdgeDensMapMoph; u8 *pu8Temp; u8 *tRoiRect; } ptLocLcMax0; struct stImage { u8 *pu8Y; u8 *pu8U; u8 *pu8V; l32 l32Width; l32 l32Height; } ptImage0; struct stLocInput { u8 *pu8SrcImg; l32 l32Width; l32 l32Height; l32 l32Stride; } ptLocInput0; struct stLocOutput { TVehLprLcRect *ptRect; l32 l32RectCount; } ptLocOutput0; //cuda thread function void *thread_func_cuda(void *arg); void init_cuda(l32 l32OrgWidth, l32 l32OrgHeight, l32 l32FrameWidth, l32 l32FrameHeight) { l32 bufLength = (l32OrgWidth * l32OrgHeight * 3) >> 1; //init semaphore sem_init(&sem_empty, 0, 1); sem_init(&sem_full, 0, 0); sem_init(&sem_ready, 0, 0); sem_init(&sem_finish, 0, 1); //allocate cuda buffer checkCudaErrors(hipMalloc(&pu8CudaImgBuf, sizeof(u8) * bufLength)); checkCudaErrors(hipMallocManaged(&pu8CudaRGBBuf, sizeof(u8) * bufLength * 2)); checkCudaErrors(hipMallocManaged(&pu8CudaZoomBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); checkCudaErrors(hipMallocManaged(&pu8CudaGrayBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); checkCudaErrors(hipMallocManaged(&pu8CudaFrameCurBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); checkCudaErrors(hipMallocManaged(&pu8CudaFGFrameBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); //allocate cuda output buffer pu8CudaRGBOut = (u8 *)malloc(sizeof(u8) * bufLength * 2); pu8CudaZoomOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); pu8CudaGrayOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); pu8CudaFrameCurOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); pu8CudaFGFrameOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); if (pu8CudaRGBOut == NULL || pu8CudaZoomOut == NULL || pu8CudaGrayOut == NULL || pu8CudaFrameCurOut == NULL || pu8CudaFGFrameOut == NULL) { printf("cuda init malloc error\n"); exit(1); } //allocate rough locate buffer checkCudaErrors(hipMallocManaged(&ptLocLcMax0.pu8EdgeDensMapOrg, sizeof(u8) * bufLength)); checkCudaErrors(hipMallocManaged(&ptLocLcMax0.pu8EdgeDensMap, sizeof(u8) * bufLength)); checkCudaErrors(hipMallocManaged(&ptLocLcMax0.pu8EdgeDensMap2, sizeof(u8) * bufLength)); checkCudaErrors(hipMallocManaged(&ptLocLcMax0.pu8EdgeDensMapMoph, sizeof(u8) * bufLength)); checkCudaErrors(hipMallocManaged(&ptLocLcMax0.pu8Temp, sizeof(u8) * bufLength)); //open bgm if (BGMGuassMogOpen((void **)&pvCudaBgModel, l32FrameWidth, l32FrameHeight) != EStatus_Success) { printf("BGM Open error"); exit(1); } //create cuda thread thread_arg.l32Width = l32OrgWidth; thread_arg.l32Stride = l32OrgWidth; thread_arg.l32Height = l32OrgHeight; int error = pthread_create(&tid, NULL, thread_func_cuda, &thread_arg); if (error != 0) { printf("create thread error: %d\n", error); } } void uninit_cuda() { //let cuda thread exit sem_wait(&sem_empty); pu8CudaInputSrc = NULL; sem_post(&sem_full); pthread_join(tid, NULL); //free cuda buffer checkCudaErrors(hipFree(pu8CudaImgBuf)); checkCudaErrors(hipFree(pu8CudaRGBBuf)); checkCudaErrors(hipFree(pu8CudaZoomBuf)); checkCudaErrors(hipFree(pu8CudaGrayBuf)); checkCudaErrors(hipFree(pu8CudaFrameCurBuf)); checkCudaErrors(hipFree(pu8CudaFGFrameBuf)); //free cuda output buffer free(pu8CudaRGBOut); free(pu8CudaZoomOut); free(pu8CudaGrayOut); free(pu8CudaFrameCurOut); free(pu8CudaFGFrameOut); //free buffer for rough locate checkCudaErrors(hipFree(ptLocLcMax0.pu8EdgeDensMapOrg)); checkCudaErrors(hipFree(ptLocLcMax0.pu8EdgeDensMap)); checkCudaErrors(hipFree(ptLocLcMax0.pu8EdgeDensMap2)); checkCudaErrors(hipFree(ptLocLcMax0.pu8EdgeDensMapMoph)); checkCudaErrors(hipFree(ptLocLcMax0.pu8Temp)); //destroy semaphore sem_destroy(&sem_empty); sem_destroy(&sem_full); sem_destroy(&sem_ready); sem_destroy(&sem_finish); //close bgm BGMGuassMogClose(pvCudaBgModel); } void check(unsigned int result, char const *const func, const char *const file, int const line) { if (result) { fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n", file, line, result, func); // Make sure we call CUDA Device Reset before exiting exit(result); } } void startTimer() { if (t_cnt < 20){ gettimeofday(&tstart[++t_cnt], NULL); } } void dispTimer(char f_name[]) { struct timeval tend; gettimeofday(&tend, NULL); long time_used = (tend.tv_sec-tstart[t_cnt].tv_sec)*1000000+tend.tv_usec-tstart[t_cnt].tv_usec; printf("%s time: %.3lf ms\n", f_name, time_used / 1000.0); t_cnt--; } class myTimer { private: struct timeval tstart, tend; long time_used; public: void Timer() { start(); } void start() { gettimeofday(&tstart, NULL); } void disp(char *str) { gettimeofday(&tend, NULL); time_used = (tend.tv_sec - tstart.tv_sec) * 1000000 + tend.tv_usec - tstart.tv_usec; printf("%s time: %.3lf ms\n", str, time_used / 1000.0); } void disp() { gettimeofday(&tend, NULL); time_used = (tend.tv_sec - tstart.tv_sec) * 1000000 + tend.tv_usec - tstart.tv_usec; printf("total time: %.3lf ms\n", time_used / 1000.0); } }; __global__ void kerUpdatePixelBackgroundGMM2( TGuass2Value* ptGuassValueSrc, u8 *pu8PixelValueSrc, u8 *pu8GuassUsedSrc, u8 *pu8ForgroundPixelSrc, f32 fLearnRate, f32 fForegroundThreshod, f32 fDeviationThreshold, f32 fTb, f32 fCT, l32 bShadowDetection, f32 fShadowThreshold, l32 l32width, l32 l32height, l32 l32stride ) { u32 l32ImgIndexX, l32ImgIndexY,l32ImgIndex; l32 bMatched = 0; l32 l32Index, l32Local; l32 bBackGroundPixel = 0; TGuass2Value* ptGuassComponent; f32 fTotalWeight = 0; f32 fPrune = -fLearnRate * fCT; u8 u8PixelValue, *pu8ForgroundPixel, *pu8GuassUsed; TGuass2Value* ptGuassValue; l32 bShadowRe; l32ImgIndexX = blockIdx.x*blockDim.x + threadIdx.x; l32ImgIndexY = blockIdx.y*blockDim.y + threadIdx.y; if ((l32ImgIndexX < l32width) && (l32ImgIndexY < l32height)){ l32ImgIndex = l32ImgIndexY*l32stride + l32ImgIndexX; ptGuassValue = ptGuassValueSrc + BGFG_MOG2_MAX_GUASSNUM*l32ImgIndex; ptGuassComponent = ptGuassValue; u8PixelValue = pu8PixelValueSrc[l32ImgIndex]; pu8ForgroundPixel = pu8ForgroundPixelSrc + l32ImgIndex; pu8GuassUsed = pu8GuassUsedSrc + l32ImgIndex; for (l32Index = 0; l32Index < (*pu8GuassUsed); l32Index++, ptGuassComponent++) { f32 fWeight = ptGuassComponent->fWeight; f32 fAlpha; fWeight = (1 - fLearnRate) * fWeight + fPrune; if (!bMatched) { f32 fDif; fDif = (ptGuassComponent->fmean - u8PixelValue) * (ptGuassComponent->fmean - u8PixelValue); if (fTotalWeight < fForegroundThreshod && fDif < fTb * ptGuassComponent->fVar) { bBackGroundPixel = 1; } if (fDif < fDeviationThreshold * ptGuassComponent->fVar) { bMatched = 1; fWeight = fWeight + fLearnRate; fAlpha = fLearnRate / fWeight; ptGuassComponent->fmean = (1 - fAlpha) * ptGuassComponent->fmean + fAlpha * u8PixelValue; ptGuassComponent->fVar = MIN(BGFG_MOG2_VAR_MAX, MAX(BGFG_MOG2_VAR_MIN, (1 - fAlpha) * ptGuassComponent->fVar + fAlpha * fDif)); for (l32Local = l32Index; l32Local > 0; l32Local--) { if (fWeight < (ptGuassValue[l32Local - 1].fWeight)) { break; } else { TGuass2Value tTempGuass = ptGuassValue[l32Local]; ptGuassValue[l32Local] = ptGuassValue[l32Local - 1]; ptGuassValue[l32Local - 1] = tTempGuass; ptGuassComponent--; } } } } if (fWeight < -fPrune) { fWeight = 0.0; (*pu8GuassUsed)--; } ptGuassComponent->fWeight = fWeight; fTotalWeight += fWeight; } ptGuassComponent = ptGuassValue; for (l32Index = 0; l32Index < (*pu8GuassUsed); l32Index++, ptGuassComponent++) { ptGuassComponent->fWeight /= fTotalWeight; } if (!bMatched) { if (BGFG_MOG2_MAX_GUASSNUM == (*pu8GuassUsed)) { ptGuassComponent = ptGuassValue + BGFG_MOG2_MAX_GUASSNUM - 1; } else { ptGuassComponent = ptGuassValue + (*pu8GuassUsed); (*pu8GuassUsed)++; } if ((*pu8GuassUsed) == 1) { ptGuassComponent->fWeight = 1; } else { ptGuassComponent->fWeight = fLearnRate; for (l32Index = 0; l32Index < (*pu8GuassUsed) - 1; l32Index++) { ptGuassValue[l32Index].fWeight = ptGuassValue[l32Index].fWeight * (1 - fLearnRate); } } ptGuassComponent->fmean = u8PixelValue; ptGuassComponent->fVar = BGFG_MOG2_VAR_INIT; for (l32Local = (*pu8GuassUsed) - 1; l32Local > 0; l32Local--) { if (fLearnRate < (ptGuassValue[l32Local - 1].fWeight)) { break; } else { TGuass2Value tTempGuass = ptGuassValue[l32Local]; ptGuassValue[l32Local] = ptGuassValue[l32Local - 1]; ptGuassValue[l32Local - 1] = tTempGuass; ptGuassComponent--; } } } if (bBackGroundPixel) { *pu8ForgroundPixel = 0; } else { if (bShadowDetection) { #if 0 f32 fWeight = 0; f32 fnumerator, fdenominator; l32 l32IModes, l32IndexD; TGuass2Value tGass2value; f32 fRate; f32 fDist2Rate; f32 fDistD; // check all the components marked as background: for (l32IModes = 0; l32IModes < *pu8GuassUsed; l32IModes++) { tGass2value = ptGuassValue[l32IModes]; fnumerator = 0.0f; fdenominator = 0.0f; fnumerator += u8PixelValue * tGass2value.fmean; fdenominator += tGass2value.fmean * tGass2value.fmean; // no division by zero allowed if (fdenominator == 0) { bShadowRe = 0; } fRate = fnumerator / fdenominator; // if tau < a < 1 then also check the color distortion if ((fRate <= 1) && (fRate >= fShadowThreshold)) { fDist2Rate = 0.0f; fDistD = fRate * tGass2value.fmean - u8PixelValue; fDist2Rate += (fDistD * fDistD); if (fDist2Rate < fTb * tGass2value.fVar * fRate * fRate) { bShadowRe = 1; } } fWeight += tGass2value.fWeight; if (fWeight > fForegroundThreshod) { bShadowRe = 0; } } bShadowRe = 0; #endif } else { bShadowRe = 0; } if (bShadowRe == 1) { *pu8ForgroundPixel = 0; } else { *pu8ForgroundPixel = 255; } } } } l32 BGMGuassMogProcess_cuda(void *pvModelParam, u8 *pu8CurrentImage, u8 *puForegroundImage) { l32 l32Row, l32Col, l32Temp, l32I; l32 bBackground, l32Fg, l32Diff; TGuass2Model* ptGuassModel; TGuass2Value* ptGuassValue; u8* pu8GuassUsed; u8* pu8CurrentPixel; u8* pu8ForgroundPixel; l32 bShadowRe; if (pvModelParam == NULL || pu8CurrentImage == NULL) { printf("input param pu8CurrentImage == NULL\n"); return ERR_VEH_DETECT_PROCESS_INPUT_PARAM; } ptGuassModel = (TGuass2Model*)pvModelParam; ptGuassValue = &ptGuassModel->ptGuassValue[0]; pu8GuassUsed = &ptGuassModel->puUsedGuassNum[0]; pu8CurrentPixel = &pu8CurrentImage[0]; pu8ForgroundPixel = &ptGuassModel->pu8ForgroundImage[0]; ptGuassModel->l32Count++; l32Temp = MIN(2 * ptGuassModel->l32Count, BGFG_MOG2_HISTORY); ptGuassModel->fLearnRate = 1 / (f32)l32Temp; // BGGuass2EstimateLum(ptGuassModel, pu8CurrentPixel); // for (l32I = 0; l32I < ptGuassModel->l32Size; l32I++) { l32Fg = pu8CurrentPixel[l32I]; if (l32Fg == 0) { //roi puForegroundImage[l32I] = 0; } else { bBackground = ptGuassModel->pu8BackgroundImage[l32I] + ptGuassModel->l32BgLumAdj; l32Diff = (l32Fg > bBackground) ? (l32Fg - bBackground) : (bBackground - l32Fg); if (l32Diff > 20) { puForegroundImage[l32I] = 255; if ((l32Fg < bBackground) && (l32Fg >(bBackground * 50 / 100))) { puForegroundImage[l32I] = 128; } } else { puForegroundImage[l32I] = 0; } } } dim3 dim3Block_rect((ptGuassModel->l32ImageWidth+31) / 32, (ptGuassModel->l32ImageHeight+7) / 8, 1); dim3 dim3threads_rect(32, 8, 1); kerUpdatePixelBackgroundGMM2<< <dim3Block_rect, dim3threads_rect >> >( ptGuassModel->ptGuassValue, pu8CurrentImage, ptGuassModel->puUsedGuassNum, ptGuassModel->pu8ForgroundImage, ptGuassModel->fLearnRate, ptGuassModel->fForegroundThreshod, ptGuassModel->fDeviationThreshold, ptGuassModel->fTb, ptGuassModel->fCT, ptGuassModel->bShadowDetection, ptGuassModel->fShadowThreshold, ptGuassModel->l32ImageWidth, ptGuassModel->l32ImageHeight, ptGuassModel->l32ImageWidth ); checkCudaErrors(hipDeviceSynchronize()); //memcpy(puForegroundImage , ptGuassModel->pu8ForgroundImage, ptGuassModel->l32ImageWidth * ptGuassModel->l32ImageHeight); BGMGuassMogGetBGImage(ptGuassModel); return EStatus_Success; } __global__ void BilinearZoom_CheckBoundary_Kernel(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32XStride, u32 u32XPositionInit, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride, u32 u32YStride, u32 u32YPositionInit) { u32 u32YSrc, u32RowIndex, u32LineIndex, u32WY2, u32WY1; u32 u32XPosition, u32XSrc, u32WX2, u32WX1, u32Vn0, u32Vn1; u8 *pu8SrcLine1; u8 *pu8SrcLine2; u8 *pu8Dst; u32LineIndex = (blockIdx.x) * blockDim.x + threadIdx.x; u32RowIndex = (blockIdx.y) * blockDim.y + threadIdx.y; if(u32LineIndex<u32DstWidth && u32RowIndex<u32DstHeight){ u32 u32YPosition = u32YPositionInit + (u32RowIndex)*u32YStride; u32YSrc = u32YPosition >> 16; u32WY2 = (u32YPosition << 16) >> 29; u32WY1 = 8 - u32WY2; u32WY2 *= 1024; u32WY1 *= 1024; pu8SrcLine1 = pu8Image1 + u32YSrc * u32SrcStride; pu8SrcLine2 = u32WY2 == 0 ? pu8SrcLine1 : pu8SrcLine1 + u32SrcStride; pu8Dst = pu8Image2 + u32RowIndex * u32DstStride + u32LineIndex; u32XPosition = u32XPositionInit + (u32LineIndex)*u32XStride; u32XSrc = u32XPosition >> 16; u32WX2 = (u32XPosition << 16) >> 29; u32WX1 = 8 - u32WX2; u32Vn0 = (pu8SrcLine1[u32XSrc] * u32WX1 + pu8SrcLine1[u32XSrc + 1] * u32WX2); u32Vn1 = (pu8SrcLine2[u32XSrc] * u32WX1 + pu8SrcLine2[u32XSrc + 1] * u32WX2); *pu8Dst = (u8)((u32Vn0 * u32WY1 + u32Vn1 * u32WY2 + 0x8000) >> 16); } } void BilinearZoom_c_cuda(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride) { u32 u32XStride, u32YStride; u32 u32YPositionInit = 0; u32 u32XPositionInit = 0; u32XStride = ((u32SrcWidth - 1) << 16) / (u32DstWidth - 1);//pow(2,16)*Src_width/Dst_width u32YStride = ((u32SrcHeight - 1) << 16) / (u32DstHeight - 1);//pow(2,16)*Src_height/Dst_height if (0 == ((u32XStride << 16) >> 27)) { u32XStride = ((u32SrcWidth - 2) << 16) / (u32DstWidth - 1);//pow(2,16)*Src_width/Dst_width u32XPositionInit = 5 << 15; } if ((u32SrcHeight != u32DstHeight) && (0 == ((u32YStride << 16) >> 27))) { u32YStride = ((u32SrcHeight - 2) << 16) / (u32DstHeight - 1);//pow(2,16)*Src_height/Dst_height u32YPositionInit = 1 << 15; } dim3 dimGrid((u32DstWidth+31) / 32, (u32DstHeight+7)/8); dim3 dimBlock(32, 8); BilinearZoom_CheckBoundary_Kernel << <dimGrid, dimBlock >> >(pu8Image1, pu8Image2, u32SrcWidth, u32SrcHeight, u32SrcStride, u32XStride, u32XPositionInit, u32DstWidth, u32DstHeight, u32DstStride, u32YStride, u32YPositionInit); checkCudaErrors(hipDeviceSynchronize()); } __global__ void kerBilinear_downgray(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32XStride, u32 u32XPositionInit, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride, u32 u32YStride, u32 u32YPositionInit, u8 *pu8GrayImg,u32 u32GrayWidth,u32 u32GrayHeight,u32 u32GrayStride) { u32 u32YSrc, u32RowIndex, u32LineIndex, u32WY2, u32WY1; u32 u32XPosition, u32XSrc, u32WX2, u32WX1, u32Vn0, u32Vn1; u8 *pu8SrcLine1; u8 *pu8SrcLine2; u8 *pu8Dst; u32 indexX,indexY; u32 sum[4],i,j; indexX = (blockIdx.x) * blockDim.x + threadIdx.x; indexY = (blockIdx.y) * blockDim.y + threadIdx.y; sum[0] = 0; sum[1] = 0; sum[2] = 0; sum[3] = 0; for(i=0;i<4;i++) for(j=0;j<4;j++) { u32LineIndex = indexX*4+j; u32RowIndex = indexY*4+i; u32 u32YPosition = u32YPositionInit + (u32RowIndex)*u32YStride; u32YSrc = u32YPosition >> 16; // u32WY2 = (u32YPosition << 16) >> 29; u32WY1 = 8 - u32WY2; //_dotprsu216 u32WY2 *= 1024; u32WY1 *= 1024; pu8SrcLine1 = pu8Image1 + u32YSrc * u32SrcStride; pu8SrcLine2 = u32WY2 == 0 ? pu8SrcLine1 : pu8SrcLine1 + u32SrcStride; //VC pu8Dst = pu8Image2 + u32RowIndex * u32DstStride + u32LineIndex;//u32DstWidth; u32XPosition = u32XPositionInit + (u32LineIndex)*u32XStride; //16 u32XSrc = u32XPosition >> 16; //163(0-8) u32WX2 = (u32XPosition << 16) >> 29; u32WX1 = 8 - u32WX2; //--_dotpu4 u32Vn0 = (pu8SrcLine1[u32XSrc] * u32WX1 + pu8SrcLine1[u32XSrc + 1] * u32WX2); u32Vn1 = (pu8SrcLine2[u32XSrc] * u32WX1 + pu8SrcLine2[u32XSrc + 1] * u32WX2); //--_dotprsu2 *pu8Dst = (u8)((u32Vn0 * u32WY1 + u32Vn1 * u32WY2 + 0x8000) >> 16); //sum[i] = sum[i] + (u32)(*pu8Dst); sum[i] += *pu8Dst; } pu8GrayImg[indexY*u32GrayStride+indexX] = (((sum[0]+2)>>2)+((sum[1]+2)>>2)+((sum[2]+2)>>2)+((sum[3]+2)>>2)+2)>>2; } void BilinearZoom_c_DownSample4x4GRAY_cuda(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride, u8 *pu8GrayImg) { u32 u32XStride, u32YStride; u32 u32YPositionInit = 0; u32 u32XPositionInit = 0; u32XStride = ((u32SrcWidth - 1) << 16) / (u32DstWidth - 1);//pow(2,16)*Src_width/Dst_width u32YStride = ((u32SrcHeight - 1) << 16) / (u32DstHeight - 1);//pow(2,16)*Src_height/Dst_height dim3 dimGrid(u32DstWidth/ 64, u32DstHeight/64); dim3 dimBlock(16, 16); u32 u32GrayWidth = u32DstWidth/4; u32 u32GrayHeight = u32DstHeight/4; u32 u32GrayStride = u32GrayWidth; kerBilinear_downgray << <dimGrid, dimBlock >> >(pu8Image1, pu8Image2, u32SrcWidth, u32SrcHeight, u32SrcStride, u32XStride, u32XPositionInit, u32DstWidth, u32DstHeight, u32DstStride, u32YStride, u32YPositionInit, pu8GrayImg,u32GrayWidth,u32GrayHeight,u32GrayStride); checkCudaErrors(hipDeviceSynchronize()); } __global__ void kerYUV2RGB(u8 *pu8SrcY, u8 *pu8SrcU, u8 *pu8SrcV, u8 *pu8Dst, l32 l32Width, l32 l32Height, l32 l32YStride, l32 l32UVStride, l32 l32RGBStride) { int xx, yy; gpu_type gY[4]; gpu_type gU, gV, gR, gG, gB; u8 *pDst[4]; int i; xx = (blockIdx.x * blockDim.x + threadIdx.x) * 2; yy = (blockIdx.y * blockDim.y + threadIdx.y) * 2; pDst[0] = pu8Dst + (l32Height - 1 - yy) * l32RGBStride + xx * 3; pDst[1] = pu8Dst + (l32Height - 1 - yy) * l32RGBStride + xx * 3 + 3; pDst[2] = pu8Dst + (l32Height - 1 - yy - 1) * l32RGBStride + xx * 3; pDst[3] = pu8Dst + (l32Height - 1 - yy - 1) * l32RGBStride + xx * 3 + 3; gY[0] = pu8SrcY[yy * l32YStride + xx] - 16; gY[1] = pu8SrcY[yy * l32YStride + xx + 1] - 16; gY[2] = pu8SrcY[(yy + 1) * l32YStride + xx] - 16; gY[3] = pu8SrcY[(yy + 1) * l32YStride + xx + 1] - 16; gU = pu8SrcU[yy / 2 * l32UVStride + xx / 2] - 128; gV = pu8SrcV[yy / 2 * l32UVStride + xx / 2] - 128; for (i = 0; i < 4; i++) { gR = (298 * gY[i] + 516 * gU + 128) / (gpu_type)256; gG = (298 * gY[i] - 100 * gU - 208 * gV + 128) / (gpu_type)256; gB = (298 * gY[i] + 409 * gV + 128) / (gpu_type)256; #ifdef __GPU_FLOAT__ gR = fmax(0, gR); gR = fmin(255, gR); gG = fmax(0, gG); gG = fmin(255, gG); gB = fmax(0, gB); gB = fmin(255, gB); #else gR = max(0, gR); gR = min(255, gR); gG = max(0, gG); gG = min(255, gG); gB = max(0, gB); gB = min(255, gB); #endif pDst[i][0] = gR; pDst[i][1] = gG; pDst[i][2] = gB; } } void YUV2RGB24Roi_cuda(stImage *ptImage, u8 *pu8RGB24Dst, l32 l32DstStride) { l32 l32Width, l32Height; u8 *pu8Y = (u8 *)ptImage->pu8Y; u8 *pu8U = (u8 *)ptImage->pu8U; u8 *pu8V = (u8 *)ptImage->pu8V; l32Width = ptImage->l32Width; l32Height = ptImage->l32Height; hipLaunchKernelGGL(( kerYUV2RGB), dim3(dim3(l32Width / (2 * 32), l32Height / (2 * 8))), dim3(dim3(32, 8)), 0, 0, pu8Y, pu8U, pu8V, pu8RGB24Dst, l32Width, l32Height, l32Width, l32Width / 2, l32DstStride); checkCudaErrors(hipDeviceSynchronize()); } __global__ void kerDia0(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32Stride) { #define MAX_OFFSET 3 u8 u8Src0[POINT_PER_THREAD + 2 * MAX_OFFSET], u8Src1[POINT_PER_THREAD + 2 * MAX_OFFSET]; __shared__ u8 u8Sh[(16 + 1) * 2 * MAX_OFFSET * 8]; //warning: the number "16" should be equal to blockDim.x and "8" should be equal to blockDim.y u8 *ptr0, *ptr1, *pSh; u8 umin, umax; int i, j; int offset, pointPerThread; pointPerThread = POINT_PER_THREAD; pu8Src = pu8Src + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; pu8Dst = pu8Dst + (blockDim.y * blockIdx.y + threadIdx.y) * l32Width + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; ptr0 = u8Src0 + MAX_OFFSET; ptr1 = u8Src1 + MAX_OFFSET; for (i = -1 * MAX_OFFSET; i < pointPerThread + MAX_OFFSET; i++) { ptr0[i] = pu8Src[i]; } //dialate 1 offset = 1; for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } ptr1[i] = umax; } //dialate 2 offset = 3; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr1[i]; pSh[i + offset] = ptr1[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr1[i - offset] = pSh[i - offset]; ptr1[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umin = 255; for (j = offset; j > 0; j--) { if (umin > ptr1[i + j]) { umin = ptr1[i + j]; } if (umin > ptr1[i - j]) { umin = ptr1[i - j]; } } if (umin > ptr1[i]) { umin = ptr1[i]; } ptr0[i] = umin; pu8Dst[i] = umin; } //dialate 3 offset = 2; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr0[i]; pSh[i + offset] = ptr0[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr0[i - offset] = pSh[i - offset]; ptr0[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } pu8Src[i] = umax; pu8Dst[i] = umax; } #undef MAX_OFFSET } __global__ void kerDia1(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32Stride) { #define MAX_OFFSET 4 u8 u8Src0[POINT_PER_THREAD + 2 * MAX_OFFSET], u8Src1[POINT_PER_THREAD + 2 * MAX_OFFSET]; __shared__ u8 u8Sh[(16 + 1) * 2 * MAX_OFFSET * 8]; //warning: the number "16" should be equal to blockDim.x and "8" should be equal to blockDim.y u8 *ptr0, *ptr1, *pSh; u8 umin, umax; int i, j; int offset, pointPerThread; pointPerThread = POINT_PER_THREAD; pu8Src = pu8Src + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; pu8Dst = pu8Dst + (blockDim.y * blockIdx.y + threadIdx.y) * l32Width + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; ptr0 = u8Src0 + MAX_OFFSET; ptr1 = u8Src1 + MAX_OFFSET; for (i = -1 * MAX_OFFSET; i < pointPerThread + MAX_OFFSET; i++) { ptr0[i] = pu8Src[i]; } //dialate 1 offset = 2; for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } ptr1[i] = umax; } //dialate 2 offset = 4; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr1[i]; pSh[i + offset] = ptr1[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr1[i - offset] = pSh[i - offset]; ptr1[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umin = 255; for (j = offset; j > 0; j--) { if (umin > ptr1[i + j]) { umin = ptr1[i + j]; } if (umin > ptr1[i - j]) { umin = ptr1[i - j]; } } if (umin > ptr1[i]) { umin = ptr1[i]; } ptr0[i] = umin; pu8Dst[i] = umin; } //dialate 3 offset = 2; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr0[i]; pSh[i + offset] = ptr0[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr0[i - offset] = pSh[i - offset]; ptr0[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } pu8Src[i] = umax; pu8Dst[i] = umax; } #undef MAX_OFFSET } void roughPart4(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32Height, l32 l32Stride) { hipLaunchKernelGGL(( kerDia0), dim3(dim3(l32Width / (POINT_PER_THREAD * 16), l32Height / (8 * 2))), dim3(dim3(16, 8)), 0, 0, pu8Src + 6, pu8Dst, l32Width, l32Stride); //the dimension should not be changed! hipLaunchKernelGGL(( kerDia1), dim3(dim3(l32Width / (POINT_PER_THREAD * 16), l32Height / (8 * 2))), dim3(dim3(16, 8)), 0, 0, pu8Src + 6 + (l32Width + 12) * l32Height / 2, pu8Dst + l32Width * l32Height / 2, l32Width, l32Stride); checkCudaErrors(hipDeviceSynchronize()); } __global__ void kerLPFilter(u8 *pu8Src, u8 *pu8Dst, l32 l32Max, l32 l32Width, l32 l32Stride) { #define FILTER_LENGTH 13 gpu_type qu[FILTER_LENGTH]; gpu_type lpf[]={3, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 5, 3}; gpu_type gSum; int head; int i, j; int pointPerThread; pointPerThread = POINT_PER_THREAD; pu8Src = pu8Src + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; pu8Dst = pu8Dst + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; for (i = -6; i <= 6; i++) { qu[i + 6] = *(pu8Src + i) * 250 / l32Max; } head = 0; for (i = 0; i < pointPerThread; i++) { gSum = 0; for (j = 0; j < FILTER_LENGTH; j++) { gSum += qu[(j + head) % FILTER_LENGTH] * lpf[j]; } pu8Dst[i] = gSum / 64; qu[(j + head) % FILTER_LENGTH] = pu8Src[i + (FILTER_LENGTH - 1) / 2 + 1] * 250 / l32Max; head++; } #undef FILTER_LENGTH } void roughPart6(u8 *pu8Src, u8 *pu8Dst, l32 l32Max, l32 l32Width, l32 l32Height, l32 l32Stride) { hipLaunchKernelGGL(( kerLPFilter), dim3(dim3(l32Width / (16 * POINT_PER_THREAD), l32Height / 4)), dim3(dim3(16, 4)), 0, 0, pu8Src + 6, pu8Dst + 6, l32Max, l32Width, l32Stride); checkCudaErrors(hipDeviceSynchronize()); } __global__ void kerGetFeature_opt(u8 *pu8RGB24, l32 l32Width, l32 l32Height, l32 l32Stride, u8 *pu8FeatureImage) { l32 xx = blockIdx.x * blockDim.x + threadIdx.x; l32 yy = blockIdx.y * blockDim.y + threadIdx.y; l32 xx_, xx0, xx1, yy_, yy0, yy1, bxx1; u8 *pSrc_, *pSrc0, *pSrc1; gpu_type f__, f_0, f_1; gpu_type f0_, f00, f01; gpu_type f1_, f10, f11; gpu_type b__, b_0, b_1; gpu_type b0_, b00, b01; gpu_type tmp_, tmp0, tmp1; gpu_type res1, res2; gpu_type fsum; int i, j; yy0 = yy * 4; yy_ = max(0, yy0 - 1); yy1 = min(l32Height - 1, yy0 + 1); pSrc_ = yy_ * l32Stride + pu8RGB24; pSrc0 = yy0 * l32Stride + pu8RGB24; pSrc1 = yy1 * l32Stride + pu8RGB24; xx0 = xx * 4; xx_ = max(0, xx0 - 1); xx1 = min(l32Width - 1, xx0 + 1); bxx1 = xx1; f__ = (gpu_type)pSrc_[xx_]; f_0 = (gpu_type)pSrc_[xx0]; f_1 = (gpu_type)pSrc_[xx1]; f0_ = (gpu_type)pSrc0[xx_]; f00 = (gpu_type)pSrc0[xx0]; f01 = (gpu_type)pSrc0[xx1]; f1_ = (gpu_type)pSrc1[xx_]; f10 = (gpu_type)pSrc1[xx0]; f11 = (gpu_type)pSrc1[xx1]; fsum = 0; for (j = 0; j < 4; j++) { b__ = f0_; b_0 = f00; b_1 = f01; b0_ = f1_; b00 = f10; b01 = f11; for (i = 0; i < 4; i++) { #ifdef __GPU_FLOAT__ tmp_ = fabs(f__ - f_1); tmp0 = fabs(f0_ - f01); tmp1 = fabs(f1_ - f11); #else tmp_ = abs(f__ - f_1); tmp0 = abs(f0_ - f01); tmp1 = abs(f1_ - f11); #endif res1 = ((tmp_ + tmp0 + 1) / 2 + (tmp0 + tmp1 + 1) / 2 + 1) / 2; #ifdef __GPU_FLOAT__ tmp_ = fabs(f__ - f1_); tmp0 = fabs(f_0 - f10); tmp1 = fabs(f_1 - f11); #else tmp_ = abs(f__ - f1_); tmp0 = abs(f_0 - f10); tmp1 = abs(f_1 - f11); #endif res2 = ((tmp_ + tmp0 + 1) / 2 + (tmp0 + tmp1 + 1) / 2 + 1) / 2; if (res1 < res2) { res1 = 0.0; } fsum += res1; xx1 = min(l32Width - 1, xx1 + 1); f__ = f_0; f_0 = f_1; f_1 = (gpu_type)pSrc_[xx1]; f0_ = f00; f00 = f01; f01 = (gpu_type)pSrc0[xx1]; f1_ = f10; f10 = f11; f11 = (gpu_type)pSrc1[xx1]; } f__ = b__; f_0 = b_0; f_1 = b_1; f0_ = b0_; f00 = b00; f01 = b01; pSrc_ = pSrc0; pSrc0 = pSrc1; xx1 = bxx1; yy1 = min(l32Height - 1, yy1 + 1); pSrc1 = yy1 * l32Stride + pu8RGB24; f1_ = (gpu_type)pSrc1[xx_]; f10 = (gpu_type)pSrc1[xx0]; f11 = (gpu_type)pSrc1[xx1]; } fsum /= (gpu_type)8; #ifdef __GPU_FLOAT__ fsum = fmin(fsum, (gpu_type)255.0); #else fsum = min(fsum, (gpu_type)255); #endif l32Stride /= 4; (pu8FeatureImage + yy * l32Stride)[xx] = (u8)fsum; } void GetFeatureYUVImage_SobelDownsample4x4_cuda(u8 *pu8RGB24, l32 l32Width, l32 l32Height, l32 l32Stride, u8 *pu8FeatureImage, u8 *pu8Temp) { hipLaunchKernelGGL(( kerGetFeature_opt), dim3(dim3(l32Width / (4 * 32), l32Height / (4 * 8))), dim3(dim3(32, 8)), 0, 0, pu8RGB24, l32Width, l32Height, l32Stride, pu8FeatureImage); checkCudaErrors(hipDeviceSynchronize()); } void DilateLine(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32DilateEleWidth) { l32 l32x, l32i1, l32i2, l32i, l32vmin, l32vmax; l32 l32EleWidth; l32 l32IsErode; if(l32DilateEleWidth > 0) { l32EleWidth = l32DilateEleWidth; l32IsErode = 0; } else { l32EleWidth = -l32DilateEleWidth; l32IsErode = 1; } for(l32x = 0; l32x < l32Width; l32x++) { l32i1 = l32x - l32EleWidth; l32i2 = l32x + l32EleWidth; if(l32i1 < 0) { l32i1 = 0; } if(l32i2 > l32Width - 1) { l32i2 = l32Width - 1; } l32vmin = 255; l32vmax = 0; if(l32IsErode) { for(l32i = l32i1; l32i <= l32i2; l32i++) { if(l32vmin > pu8Src[l32i]) { l32vmin = pu8Src[l32i]; } } pu8Dst[l32x] = l32vmin; } else { for(l32i = l32i1; l32i <= l32i2; l32i++) { if(l32vmax < pu8Src[l32i]) { l32vmax = pu8Src[l32i]; } } pu8Dst[l32x] = l32vmax; } } } l32 VehLprLocMaxProcess_cuda(stLocLcMax * ptLocLcMax, stLocInput *ptLocInput, stLocOutput *ptLocOutput) { l32 l32X, l32Y, l32i0, l32i1, l32i; l32 l32Xs, l32Xe, l32Ys, l32Ye; u8 *pu8Dst, *pu8Src, *pu8Temp, *pu8TempH, *pu8TempV; s16 *ps16SumTmp; u8 *pu8SrcTmp; l32 l32Width, l32Height; l32 l32PlateWidth, l32PlateHeight; l32 l32Temp0, l32Temp1, l32Temp2; l32 l32OffSet; u32 u32Temp0, u32Temp1; u32 u32Temp2, u32Temp3; u32 u32Temp4, u32Temp5; u32 u32Temp6, u32Temp7; l32 l32Val, l32Max; l32 l32Xs0,l32Xe0,l32Ys0,l32Ye0; l32 l32RectID; LCMAX_POS aLCMaxPos[20]; l32 l32PeakS, l32LcX, l32LcY, l32MeetMax; TVehLprLcRect tRoiRect; l32 l32DesMapExtStride; l32 l32DensMapStride; u32 u32V0, u32V1, u32V2, u32V3; u32 u32V4, u32V5, u32V6, u32V7; u32 u32V8, u32V9, u32V10, u32V11, u32V12; u8 *RESTRICT pu8TmpDst1 = NULL; u16 *RESTRICT pu16TmpDstV = NULL; u16 *RESTRICT pu16TmpDstH = NULL; u16 *RESTRICT pu16TmpSrc = NULL; u8 *RESTRICT pu8TmpSrc1 = NULL; u8 *RESTRICT pu8TmpSrc2 = NULL; u8 *RESTRICT pu8TmpSrc3 = NULL; s64 Res0, Res1, lMask; u64 u64X0, u64X1, u64X2, u64X3, u64X4, u64X5, u64X6, u64X7; u64 u64Data0, u64Data1, u64Data2, u64Data3, u64Data4, u64Data5, u64Data6, u64Data7; u64 u64Mask0, u64Mask1, u64Mask2, u64Mask3; u64 u64Tmp0, u64Tmp1, u64Tmp2, u64Tmp3; u64 u64Const9; l32 l32LoopA; myTimer timer; memset(ptLocLcMax->pu8EdgeDensMap, 0, ptLocInput->l32Width * ptLocInput->l32Height / 16); for(l32RectID = 0; l32RectID < MAX_LOCNUM; l32RectID++) { ptLocOutput->ptRect[l32RectID].l32Valid = 0; } ptLocOutput->l32RectCount = MAX_LOCNUM; GetFeatureYUVImage_SobelDownsample4x4_cuda (ptLocInput->pu8SrcImg, ptLocInput->l32Width, ptLocInput->l32Height, ptLocInput->l32Stride, ptLocLcMax->pu8EdgeDensMapOrg, NULL); l32Width = (ptLocInput->l32Width/4); l32Height = (ptLocInput->l32Height/4); /* tRoiRect = ptLocLcMax->tRoiRect; tRoiRect.l32top = tRoiRect.l32top/4; tRoiRect.l32bottom = tRoiRect.l32bottom/4; tRoiRect.l32left = tRoiRect.l32left/4; tRoiRect.l32right = tRoiRect.l32right/4; startTimer(); pu8Dst = ptLocLcMax->pu8EdgeDensMapOrg; for(l32Y = 0; l32Y<l32Height; l32Y++) { if((l32Y < tRoiRect.l32top)||(l32Y > tRoiRect.l32bottom)) { memset(pu8Dst, 0, l32Width); } else { memset(pu8Dst, 0, tRoiRect.l32left); memset(pu8Dst + tRoiRect.l32right, 0, l32Width - tRoiRect.l32right); } pu8Dst += l32Width; } dispTimer("part1"); */ //Really stupid. for (l32Y = 0; l32Y < l32Height; l32Y++) { ptLocLcMax->pu8EdgeDensMapOrg[l32Y * l32Width + l32Width - 1] = 0; } pu8Src = ptLocLcMax->pu8EdgeDensMapOrg; pu8Dst = ptLocLcMax->pu8EdgeDensMap; l32DensMapStride = ((l32Width/2) + 7)& 0xFFFFFFF8; l32DesMapExtStride = l32DensMapStride + 2 * EDGE_WIDTH; //8(l32Width/2)88DilateSIMD for(l32Y = 0; l32Y<l32Height; l32Y++) { for (l32X = 0; l32X < EDGE_WIDTH; l32X++) { pu8Dst[l32X] = 0; } for(; l32X < EDGE_WIDTH + l32Width / 2; l32X++) { pu8Dst[l32X] = (pu8Src[2 * (l32X - EDGE_WIDTH)] + pu8Src[2 * (l32X - EDGE_WIDTH) + 1] + 1) >> 1; } for(; l32X < l32DesMapExtStride; l32X++) { pu8Dst[l32X] = 0; } pu8Src += l32Width; pu8Dst += l32DesMapExtStride; } l32Width = l32Width / 2; pu8Dst = (u8*)ptLocLcMax->pu8Temp + l32Width; // ps16SumTmp = (s16 *)ptLocLcMax->pu8Temp; ps16SumTmp = (s16 *)(((u32)ps16SumTmp + 32) & (~7)); memset(ps16SumTmp, 0, sizeof(s16) * l32Width); pu8Dst = (u8 *)(ps16SumTmp + l32Width + 16); pu8TempH = pu8Dst + l32Width; pu8TempV = pu8TempH + l32Width; // //for(l32i = l32Y - 8;l32i <= l32Y + 8; l32i++) #define HEIGHT_RADIUS 8 //8 for(l32X = 0; l32X < l32Width; l32X++) { l32Temp0 = 0; for(l32i = -(HEIGHT_RADIUS + 1); l32i <= (HEIGHT_RADIUS - 1); l32i++) { l32i0 = l32i; if(l32i0 < 0) { l32i0 = 0; } if(l32i0 > l32Height - 1) { l32i0 = l32Height - 1; } l32Temp0 += ptLocLcMax->pu8EdgeDensMap[EDGE_WIDTH + l32X + l32i0 * l32DesMapExtStride]; } ps16SumTmp[l32X] = l32Temp0; } for(l32Y = 0; l32Y < l32Height; l32Y ++) { u8 *pu8SrcRowH, *pu8SrcRowT; //l32Width 5 //l32Width 5 //for(l32i = l32X - 4; l32i <= l32X + 4; l32i++) //9 #define WIDTH_RADIUS 4 //4 pu8SrcRowH = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8SrcRowT = pu8SrcRowH + WIDTH_RADIUS; //Sum: -5,-4,-3,-2,-1,0,1,2,3 l32Temp2 = pu8SrcRowH[0] * 6 + pu8SrcRowH[1] + pu8SrcRowH[2] + pu8SrcRowH[3]; for(l32X = 0; l32X < WIDTH_RADIUS + 1; l32X++) { l32Temp2 -= *pu8SrcRowH; l32Temp2 += *pu8SrcRowT; pu8TempH[l32X] = l32Temp2 / (WIDTH_RADIUS * 2 + 1); pu8SrcRowT++; } for(; l32X < l32Width - (WIDTH_RADIUS + 1); l32X++) { l32Temp2 -= *pu8SrcRowH; l32Temp2 += *pu8SrcRowT; pu8TempH[l32X] = l32Temp2 / (WIDTH_RADIUS * 2 + 1); pu8SrcRowH++; pu8SrcRowT++; } for(; l32X < l32Width; l32X++) { l32Temp2 -= *pu8SrcRowH; l32Temp2 += *pu8SrcRowT; pu8TempH[l32X] = l32Temp2 / (WIDTH_RADIUS * 2 + 1); pu8SrcRowH++; } l32i0 = l32Y - (HEIGHT_RADIUS + 1); if(l32i0 < 0) { l32i0 = 0; } l32i1 = l32Y + HEIGHT_RADIUS; if(l32i1 > l32Height - 1) { l32i1 = l32Height - 1; } pu8SrcRowH = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32i0 * l32DesMapExtStride; pu8SrcRowT = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32i1 * l32DesMapExtStride; for(l32X = 0; l32X < l32Width; l32X++) { // ps16SumTmp[l32X] -= pu8SrcRowH[l32X]; ps16SumTmp[l32X] += pu8SrcRowT[l32X]; l32Temp1 = ps16SumTmp[l32X]; pu8TempV[l32X] = l32Temp1 / (HEIGHT_RADIUS * 2 + 1); } DilateLine(pu8TempV, pu8Dst, l32Width, 1); for(l32X = EDGE_WIDTH; l32X < l32Width + EDGE_WIDTH; l32X++) { if(pu8Dst[l32X - EDGE_WIDTH] < pu8TempH[l32X - EDGE_WIDTH]) { ptLocLcMax->pu8EdgeDensMap2[l32X + l32Y * l32DesMapExtStride] = ptLocLcMax->pu8EdgeDensMap[l32X + l32Y * l32DesMapExtStride]; } else { ptLocLcMax->pu8EdgeDensMap2[l32X + l32Y * l32DesMapExtStride] = 0; } } //edge point is handled here for (l32X = 0; l32X < EDGE_WIDTH; l32X++) { ptLocLcMax->pu8EdgeDensMap2[l32X + l32Y * l32DesMapExtStride] = ptLocLcMax->pu8EdgeDensMap2[EDGE_WIDTH + l32Y * l32DesMapExtStride]; ptLocLcMax->pu8EdgeDensMap2[EDGE_WIDTH + l32Width + l32X + l32Y * l32DesMapExtStride] = ptLocLcMax->pu8EdgeDensMap2[EDGE_WIDTH + l32Width - 1 + l32Y * l32DesMapExtStride]; } } roughPart4(ptLocLcMax->pu8EdgeDensMap2, ptLocLcMax->pu8EdgeDensMapMoph, l32Width, l32Height, l32DesMapExtStride); l32Max = 0; for(l32Y = 0; l32Y < l32Height; l32Y++) { pu8Src = ptLocLcMax->pu8EdgeDensMap2 + l32Y * l32DesMapExtStride; for(l32X = EDGE_WIDTH; l32X < l32Width + EDGE_WIDTH; l32X++) { if(pu8Src[l32X] < 10) { pu8Src[l32X] = 0; } if(l32Max < pu8Src[l32X]) { l32Max = pu8Src[l32X]; } } for (l32X = 0; l32X < EDGE_WIDTH; l32X++) { pu8Src[l32X] = pu8Src[EDGE_WIDTH]; } for (l32X = l32Width + EDGE_WIDTH; l32X < l32Width + EDGE_WIDTH * 2; l32X++) { pu8Src[l32X] = pu8Src[l32Width + EDGE_WIDTH - 1]; } } if(l32Max < 0) { l32Max = 250; } roughPart6(ptLocLcMax->pu8EdgeDensMap2, ptLocLcMax->pu8EdgeDensMap, l32Max, l32Width, l32Height, l32DesMapExtStride); //1 2 1 for(l32Y = 0; l32Y < l32Height; l32Y++) { // pu8Src = ptLocLcMax->pu8EdgeDensMap + l32DesMapExtStride + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8Src = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8Dst = ptLocLcMax->pu8EdgeDensMap2 + EDGE_WIDTH + l32Y * l32DesMapExtStride; if(l32Y == 0) { for(l32X = 0; l32X < l32Width; l32X++) { pu8Dst[l32X] = (pu8Src[l32X] + pu8Src[l32X + l32DesMapExtStride]) >> 1; } } else if(l32Y == l32Height - 1) { for(l32X=0; l32X<l32Width; l32X++) { pu8Dst[l32X] = (pu8Src[l32X] + pu8Src[l32X - l32DesMapExtStride]) >> 1; } } else { for(l32X = 0; l32X < l32Width; l32X++) { pu8Dst[l32X] = (pu8Src[l32X - l32DesMapExtStride] + 2*pu8Src[l32X] + pu8Src[l32X + l32DesMapExtStride]) >> 2; } } for(l32X = 0; l32X < l32Width; l32X++) { if(pu8Dst[l32X] < 10) { pu8Dst[l32X] = 0; } } } for(l32Y = 0; l32Y < l32Height; l32Y++) { memset(ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride, 0, l32Width); } memset(aLCMaxPos, 0, sizeof(aLCMaxPos)); for(l32Y = 1; l32Y < l32Height; l32Y++) { pu8Src = ptLocLcMax->pu8EdgeDensMap2 + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8Dst = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride; l32PeakS = -1; l32Val = 0; for(l32X = 1; l32X < l32Width; l32X++) { if((pu8Src[l32X] < pu8Src[l32X-1]) || (l32X == (l32Width - 1))) { if(l32Val == 1) { if(l32PeakS < 0) l32PeakS = l32X - 1; l32MeetMax = 0; for(l32i = l32PeakS; l32i < l32X; l32i++) { if(pu8Src[l32i] >= pu8Src[l32i-l32DesMapExtStride] && pu8Src[l32i] >= pu8Src[l32i-l32DesMapExtStride-1] && pu8Src[l32i] >= pu8Src[l32i-l32DesMapExtStride+1] && pu8Src[l32i] >= pu8Src[l32i+l32DesMapExtStride] && pu8Src[l32i] >= pu8Src[l32i+l32DesMapExtStride-1] && pu8Src[l32i] >= pu8Src[l32i+l32DesMapExtStride+1]) { //pu8Dst[l32i] = 255; l32MeetMax = 1; } } //, if(l32MeetMax) { l32LcX = (l32PeakS + l32X)/2; l32LcY = l32Y; if(pu8Src[l32LcX] > 0) { pu8Dst[l32LcX] = 255; // for(l32i1 = 0;l32i1 < sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0]); l32i1++) { if(aLCMaxPos[l32i1].l32Val < pu8Src[l32LcX]) { // break; } } if(l32i1 < sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0])) { // for(l32i = sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0])-1;l32i > l32i1; l32i--) { aLCMaxPos[l32i] = aLCMaxPos[l32i-1]; } aLCMaxPos[l32i1].l32PosX = l32X; aLCMaxPos[l32i1].l32PosY = l32LcY; aLCMaxPos[l32i1].l32Val = pu8Src[l32LcX]; } } } } l32PeakS = -1; l32Val = -1; } else if(pu8Src[l32X] == pu8Src[l32X - 1]) { if(l32PeakS < 0) { l32PeakS = l32X - 1; } } else if(pu8Src[l32X] > pu8Src[l32X - 1]) { l32Val = 1; // l32PeakS = -1; } } } l32RectID = 0; memset(ptLocLcMax->pu8EdgeDensMap2, 0, l32Width * l32Height); for(l32i = 0;l32i < sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0]); l32i++) { l32X = aLCMaxPos[l32i].l32PosX; l32Y = aLCMaxPos[l32i].l32PosY; if(aLCMaxPos[l32i].l32Val == 0) { break; } l32Val = 10; // pu8Src = ptLocLcMax->pu8EdgeDensMapMoph + l32X; for(l32Ys = aLCMaxPos[l32i].l32PosY; l32Ys > aLCMaxPos[l32i].l32PosY - 3; l32Ys--) { if(pu8Src[l32Ys * l32DensMapStride] < l32Val) { break; } } for(l32Ye = aLCMaxPos[l32i].l32PosY; l32Ye < aLCMaxPos[l32i].l32PosY + 3; l32Ye++) { if(pu8Src[l32Ye*l32DensMapStride] < l32Val) { break; } } /* if(ptLocLcMax->l32UpsideDown == 0) { if(l32Y < l32Height/2) { l32Ys ++; } } else { // yuv if(l32Y > l32Height/2) { l32Ys ++; } } */ //only the following will be exectued if(l32Y > l32Height/2) { l32Ys ++; } pu8Temp = (u8*)ptLocLcMax->pu8Temp; for(l32Xs = 0; l32Xs < l32Width; l32Xs++) { l32Val = 0; for(l32Y = l32Ys; l32Y < l32Ye; l32Y++) { if(l32Val < ptLocLcMax->pu8EdgeDensMapMoph[l32Y*l32DensMapStride + l32Xs]) { l32Val = ptLocLcMax->pu8EdgeDensMapMoph[l32Y*l32DensMapStride + l32Xs]; } } l32Val = l32Val * 3; if(l32Val > 255) l32Val = 255; pu8Temp[l32Xs] = l32Val; } l32Val = pu8Temp[l32X] * 40/100; pu8Src = pu8Temp; for(l32Xs = aLCMaxPos[l32i].l32PosX; l32Xs > aLCMaxPos[l32i].l32PosX - 11; l32Xs--) { if(pu8Src[l32Xs] <= l32Val) { break; } } for(l32Xe = aLCMaxPos[l32i].l32PosX; l32Xe < aLCMaxPos[l32i].l32PosX + 11; l32Xe++) { if(pu8Src[l32Xe] <= l32Val) { break; } } if((l32Ye <= l32Ys) || (l32Xe <= l32Xs)) { continue; } if(l32Xs < 0) l32Xs = 0; if(l32Ys < 0) l32Ys = 0; if(l32Xe > l32Width-1) l32Xe = l32Width-1; if(l32Ye > l32Height-1) l32Ye = l32Height-1; l32Xs0 = l32Xs; l32Xe0 = l32Xe; l32Ys0 = l32Ys; l32Ye0 = l32Ye; { l32Xs *= 8; l32Xe *= 8; l32Ys *= 4; l32Ye *= 4; } l32PlateHeight = (l32Ye-l32Ys); l32PlateWidth = (l32Xe-l32Xs); if(aLCMaxPos[l32i].l32Val > 0) { if(l32RectID < ptLocOutput->l32RectCount) { l32 l32W,l32H; l32W = l32Xe - l32Xs; l32H = l32Ye - l32Ys; ptLocOutput->ptRect[l32RectID].l32left = l32Xs; ptLocOutput->ptRect[l32RectID].l32right = l32Xe; ptLocOutput->ptRect[l32RectID].l32top = l32Ys; ptLocOutput->ptRect[l32RectID].l32bottom = l32Ye; ptLocOutput->ptRect[l32RectID].l32Valid = 1; l32RectID ++; } } } // ptLocOutput->l32RectCount = l32RectID; return 0; } void *thread_func_cuda(void *arg) { struct _cuda_thread_arg *th_arg = (struct _cuda_thread_arg *)arg; TVehLprLcRect tRect[MAX_LOCNUM] = {0}; myTimer timer2; ptLocInput0.l32Width = th_arg->l32Width; ptLocInput0.l32Height = th_arg->l32Height; ptLocInput0.l32Stride = th_arg->l32Stride; ptImage0.l32Width = ptLocInput0.l32Width; ptImage0.l32Height = ptLocInput0.l32Height; while (1) { sem_wait(&sem_full); if (pu8CudaInputSrc == NULL) { printf("cuda exit successfully\n"); return NULL; } timer2.start(); //copy image source //memcpy(pu8CudaImgBuf, pu8CudaInputSrc, (ptLocInput0.l32Width * ptLocInput0.l32Height * 3) >> 1); checkCudaErrors(hipMemcpy(pu8CudaImgBuf, pu8CudaInputSrc, (ptLocInput0.l32Width * ptLocInput0.l32Height * 3) >> 1, hipMemcpyHostToDevice)); sem_post(&sem_empty); timer2.disp("copy in"); timer2.start(); //rough locate ptLocInput0.pu8SrcImg = pu8CudaImgBuf; ptLocOutput0.ptRect = tRect; ptLocOutput0.l32RectCount = MAX_LOCNUM; VehLprLocMaxProcess_cuda(&ptLocLcMax0, &ptLocInput0, &ptLocOutput0); //yuv2rgb ptImage0.pu8Y = pu8CudaImgBuf; ptImage0.pu8U = pu8CudaImgBuf + ptLocInput0.l32Width * ptLocInput0.l32Height; ptImage0.pu8V = pu8CudaImgBuf + (ptLocInput0.l32Width * ptLocInput0.l32Height * 5) / 4; YUV2RGB24Roi_cuda(&ptImage0, pu8CudaRGBBuf, ptLocInput0.l32Width * 3); //resize BilinearZoom_c_DownSample4x4GRAY_cuda(pu8CudaImgBuf, pu8CudaZoomBuf, ptLocInput0.l32Width, ptLocInput0.l32Height, ptLocInput0.l32Stride, SCALEWIDTH, SCALEHEIGHT, SCALEWIDTH, pu8CudaGrayBuf); TGuass2Model *ptGuassModel = (TGuass2Model*)pvCudaBgModel; BilinearZoom_c_cuda(pu8CudaGrayBuf,pu8CudaFrameCurBuf, SCALEWIDTH/4,SCALEHEIGHT/4,SCALEWIDTH/4, ptGuassModel->l32ImageWidth,ptGuassModel->l32ImageHeight,ptGuassModel->l32ImageWidth); //bgm int state = BGMGuassMogProcess_cuda(ptGuassModel,pu8CudaFrameCurBuf,pu8CudaFGFrameBuf); if(state!=EStatus_Success){ printf("BGMGuass ERROR!\n"); exit(EXIT_FAILURE); } timer2.disp("cuda process"); sem_wait(&sem_finish); timer2.start(); //rough output memcpy(&atCudaRoughRectsOut, ptLocOutput0.ptRect, sizeof(TVehLprLcRect) * MAX_LOCNUM); l32CudaRectCountOut = ptLocOutput0.l32RectCount; //yuv2rgb output memcpy(pu8CudaRGBOut, pu8CudaRGBBuf, ptLocInput0.l32Width * ptLocInput0.l32Height * 3); //resize output memcpy(pu8CudaZoomOut, pu8CudaZoomBuf, SCALEWIDTH * SCALEHEIGHT); memcpy(pu8CudaGrayOut, pu8CudaGrayBuf, SCALEWIDTH * SCALEHEIGHT); memcpy(pu8CudaFrameCurOut, pu8CudaFrameCurBuf, ptGuassModel->l32ImageWidth * ptGuassModel->l32ImageHeight); //bgm output memcpy(pu8CudaFGFrameOut, pu8CudaFGFrameBuf, ptGuassModel->l32ImageWidth * ptGuassModel->l32ImageHeight); timer2.disp("copy out"); sem_post(&sem_ready); } }
261726d068a2ebe50cebb611ece826597c6166d9.cu
#include "func_cuda.h" #include <pthread.h> #include <cuda_runtime.h> #include <sys/time.h> #include <stdlib.h> #include "alg_bgmodel.h" #define POINT_PER_THREAD 15 #define EDGE_WIDTH 6 //shared with other files TVehLprLcRect atCudaRoughRectsOut[MAX_LOCNUM]; l32 l32CudaRectCountOut; u8 *pu8CudaInputSrc; u8 *pu8CudaRGBOut; u8 *pu8CudaZoomOut; u8 *pu8CudaGrayOut; u8 *pu8CudaFrameCurOut; u8 *pu8CudaFGFrameOut; sem_t sem_empty, sem_full, sem_ready, sem_finish; //only used in this file u8 *pu8CudaImgBuf; u8 *pu8CudaRGBBuf; u8 *pu8CudaZoomBuf; u8 *pu8CudaGrayBuf; u8 *pu8CudaFrameCurBuf; u8 *pu8CudaFGFrameBuf; void *pvCudaBgModel; struct timeval tstart[20]; int t_cnt = -1; //thread param pthread_t tid; struct _cuda_thread_arg { l32 l32Width; l32 l32Height; l32 l32Stride; }thread_arg; //inner function param typedef struct _LCMAX_POS_ { l32 l32PosX; l32 l32PosY; l32 l32Val; }LCMAX_POS; struct stLocLcMax { u8 *pu8EdgeDensMap; u8 *pu8EdgeDensMapOrg; u8 *pu8EdgeDensMap2; u8 *pu8EdgeDensMapMoph; u8 *pu8Temp; u8 *tRoiRect; } ptLocLcMax0; struct stImage { u8 *pu8Y; u8 *pu8U; u8 *pu8V; l32 l32Width; l32 l32Height; } ptImage0; struct stLocInput { u8 *pu8SrcImg; l32 l32Width; l32 l32Height; l32 l32Stride; } ptLocInput0; struct stLocOutput { TVehLprLcRect *ptRect; l32 l32RectCount; } ptLocOutput0; //cuda thread function void *thread_func_cuda(void *arg); void init_cuda(l32 l32OrgWidth, l32 l32OrgHeight, l32 l32FrameWidth, l32 l32FrameHeight) { l32 bufLength = (l32OrgWidth * l32OrgHeight * 3) >> 1; //init semaphore sem_init(&sem_empty, 0, 1); sem_init(&sem_full, 0, 0); sem_init(&sem_ready, 0, 0); sem_init(&sem_finish, 0, 1); //allocate cuda buffer checkCudaErrors(cudaMalloc(&pu8CudaImgBuf, sizeof(u8) * bufLength)); checkCudaErrors(cudaMallocManaged(&pu8CudaRGBBuf, sizeof(u8) * bufLength * 2)); checkCudaErrors(cudaMallocManaged(&pu8CudaZoomBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); checkCudaErrors(cudaMallocManaged(&pu8CudaGrayBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); checkCudaErrors(cudaMallocManaged(&pu8CudaFrameCurBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); checkCudaErrors(cudaMallocManaged(&pu8CudaFGFrameBuf, sizeof(u8) * SCALEWIDTH * SCALEHEIGHT)); //allocate cuda output buffer pu8CudaRGBOut = (u8 *)malloc(sizeof(u8) * bufLength * 2); pu8CudaZoomOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); pu8CudaGrayOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); pu8CudaFrameCurOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); pu8CudaFGFrameOut = (u8 *)malloc(sizeof(u8) * SCALEWIDTH * SCALEHEIGHT); if (pu8CudaRGBOut == NULL || pu8CudaZoomOut == NULL || pu8CudaGrayOut == NULL || pu8CudaFrameCurOut == NULL || pu8CudaFGFrameOut == NULL) { printf("cuda init malloc error\n"); exit(1); } //allocate rough locate buffer checkCudaErrors(cudaMallocManaged(&ptLocLcMax0.pu8EdgeDensMapOrg, sizeof(u8) * bufLength)); checkCudaErrors(cudaMallocManaged(&ptLocLcMax0.pu8EdgeDensMap, sizeof(u8) * bufLength)); checkCudaErrors(cudaMallocManaged(&ptLocLcMax0.pu8EdgeDensMap2, sizeof(u8) * bufLength)); checkCudaErrors(cudaMallocManaged(&ptLocLcMax0.pu8EdgeDensMapMoph, sizeof(u8) * bufLength)); checkCudaErrors(cudaMallocManaged(&ptLocLcMax0.pu8Temp, sizeof(u8) * bufLength)); //open bgm if (BGMGuassMogOpen((void **)&pvCudaBgModel, l32FrameWidth, l32FrameHeight) != EStatus_Success) { printf("BGM Open error"); exit(1); } //create cuda thread thread_arg.l32Width = l32OrgWidth; thread_arg.l32Stride = l32OrgWidth; thread_arg.l32Height = l32OrgHeight; int error = pthread_create(&tid, NULL, thread_func_cuda, &thread_arg); if (error != 0) { printf("create thread error: %d\n", error); } } void uninit_cuda() { //let cuda thread exit sem_wait(&sem_empty); pu8CudaInputSrc = NULL; sem_post(&sem_full); pthread_join(tid, NULL); //free cuda buffer checkCudaErrors(cudaFree(pu8CudaImgBuf)); checkCudaErrors(cudaFree(pu8CudaRGBBuf)); checkCudaErrors(cudaFree(pu8CudaZoomBuf)); checkCudaErrors(cudaFree(pu8CudaGrayBuf)); checkCudaErrors(cudaFree(pu8CudaFrameCurBuf)); checkCudaErrors(cudaFree(pu8CudaFGFrameBuf)); //free cuda output buffer free(pu8CudaRGBOut); free(pu8CudaZoomOut); free(pu8CudaGrayOut); free(pu8CudaFrameCurOut); free(pu8CudaFGFrameOut); //free buffer for rough locate checkCudaErrors(cudaFree(ptLocLcMax0.pu8EdgeDensMapOrg)); checkCudaErrors(cudaFree(ptLocLcMax0.pu8EdgeDensMap)); checkCudaErrors(cudaFree(ptLocLcMax0.pu8EdgeDensMap2)); checkCudaErrors(cudaFree(ptLocLcMax0.pu8EdgeDensMapMoph)); checkCudaErrors(cudaFree(ptLocLcMax0.pu8Temp)); //destroy semaphore sem_destroy(&sem_empty); sem_destroy(&sem_full); sem_destroy(&sem_ready); sem_destroy(&sem_finish); //close bgm BGMGuassMogClose(pvCudaBgModel); } void check(unsigned int result, char const *const func, const char *const file, int const line) { if (result) { fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n", file, line, result, func); // Make sure we call CUDA Device Reset before exiting exit(result); } } void startTimer() { if (t_cnt < 20){ gettimeofday(&tstart[++t_cnt], NULL); } } void dispTimer(char f_name[]) { struct timeval tend; gettimeofday(&tend, NULL); long time_used = (tend.tv_sec-tstart[t_cnt].tv_sec)*1000000+tend.tv_usec-tstart[t_cnt].tv_usec; printf("%s time: %.3lf ms\n", f_name, time_used / 1000.0); t_cnt--; } class myTimer { private: struct timeval tstart, tend; long time_used; public: void Timer() { start(); } void start() { gettimeofday(&tstart, NULL); } void disp(char *str) { gettimeofday(&tend, NULL); time_used = (tend.tv_sec - tstart.tv_sec) * 1000000 + tend.tv_usec - tstart.tv_usec; printf("%s time: %.3lf ms\n", str, time_used / 1000.0); } void disp() { gettimeofday(&tend, NULL); time_used = (tend.tv_sec - tstart.tv_sec) * 1000000 + tend.tv_usec - tstart.tv_usec; printf("total time: %.3lf ms\n", time_used / 1000.0); } }; __global__ void kerUpdatePixelBackgroundGMM2( TGuass2Value* ptGuassValueSrc, u8 *pu8PixelValueSrc, u8 *pu8GuassUsedSrc, u8 *pu8ForgroundPixelSrc, f32 fLearnRate, f32 fForegroundThreshod, f32 fDeviationThreshold, f32 fTb, f32 fCT, l32 bShadowDetection, f32 fShadowThreshold, l32 l32width, l32 l32height, l32 l32stride ) { u32 l32ImgIndexX, l32ImgIndexY,l32ImgIndex; l32 bMatched = 0; l32 l32Index, l32Local; l32 bBackGroundPixel = 0; TGuass2Value* ptGuassComponent; f32 fTotalWeight = 0; f32 fPrune = -fLearnRate * fCT; u8 u8PixelValue, *pu8ForgroundPixel, *pu8GuassUsed; TGuass2Value* ptGuassValue; l32 bShadowRe; l32ImgIndexX = blockIdx.x*blockDim.x + threadIdx.x; l32ImgIndexY = blockIdx.y*blockDim.y + threadIdx.y; if ((l32ImgIndexX < l32width) && (l32ImgIndexY < l32height)){ l32ImgIndex = l32ImgIndexY*l32stride + l32ImgIndexX; ptGuassValue = ptGuassValueSrc + BGFG_MOG2_MAX_GUASSNUM*l32ImgIndex; ptGuassComponent = ptGuassValue; u8PixelValue = pu8PixelValueSrc[l32ImgIndex]; pu8ForgroundPixel = pu8ForgroundPixelSrc + l32ImgIndex; pu8GuassUsed = pu8GuassUsedSrc + l32ImgIndex; for (l32Index = 0; l32Index < (*pu8GuassUsed); l32Index++, ptGuassComponent++) { f32 fWeight = ptGuassComponent->fWeight; f32 fAlpha; fWeight = (1 - fLearnRate) * fWeight + fPrune; if (!bMatched) { f32 fDif; fDif = (ptGuassComponent->fmean - u8PixelValue) * (ptGuassComponent->fmean - u8PixelValue); if (fTotalWeight < fForegroundThreshod && fDif < fTb * ptGuassComponent->fVar) { bBackGroundPixel = 1; } if (fDif < fDeviationThreshold * ptGuassComponent->fVar) { bMatched = 1; fWeight = fWeight + fLearnRate; fAlpha = fLearnRate / fWeight; ptGuassComponent->fmean = (1 - fAlpha) * ptGuassComponent->fmean + fAlpha * u8PixelValue; ptGuassComponent->fVar = MIN(BGFG_MOG2_VAR_MAX, MAX(BGFG_MOG2_VAR_MIN, (1 - fAlpha) * ptGuassComponent->fVar + fAlpha * fDif)); for (l32Local = l32Index; l32Local > 0; l32Local--) { if (fWeight < (ptGuassValue[l32Local - 1].fWeight)) { break; } else { TGuass2Value tTempGuass = ptGuassValue[l32Local]; ptGuassValue[l32Local] = ptGuassValue[l32Local - 1]; ptGuassValue[l32Local - 1] = tTempGuass; ptGuassComponent--; } } } } if (fWeight < -fPrune) { fWeight = 0.0; (*pu8GuassUsed)--; } ptGuassComponent->fWeight = fWeight; fTotalWeight += fWeight; } ptGuassComponent = ptGuassValue; for (l32Index = 0; l32Index < (*pu8GuassUsed); l32Index++, ptGuassComponent++) { ptGuassComponent->fWeight /= fTotalWeight; } if (!bMatched) { if (BGFG_MOG2_MAX_GUASSNUM == (*pu8GuassUsed)) { ptGuassComponent = ptGuassValue + BGFG_MOG2_MAX_GUASSNUM - 1; } else { ptGuassComponent = ptGuassValue + (*pu8GuassUsed); (*pu8GuassUsed)++; } if ((*pu8GuassUsed) == 1) { ptGuassComponent->fWeight = 1; } else { ptGuassComponent->fWeight = fLearnRate; for (l32Index = 0; l32Index < (*pu8GuassUsed) - 1; l32Index++) { ptGuassValue[l32Index].fWeight = ptGuassValue[l32Index].fWeight * (1 - fLearnRate); } } ptGuassComponent->fmean = u8PixelValue; ptGuassComponent->fVar = BGFG_MOG2_VAR_INIT; for (l32Local = (*pu8GuassUsed) - 1; l32Local > 0; l32Local--) { if (fLearnRate < (ptGuassValue[l32Local - 1].fWeight)) { break; } else { TGuass2Value tTempGuass = ptGuassValue[l32Local]; ptGuassValue[l32Local] = ptGuassValue[l32Local - 1]; ptGuassValue[l32Local - 1] = tTempGuass; ptGuassComponent--; } } } if (bBackGroundPixel) { *pu8ForgroundPixel = 0; } else { if (bShadowDetection) { #if 0 f32 fWeight = 0; f32 fnumerator, fdenominator; l32 l32IModes, l32IndexD; TGuass2Value tGass2value; f32 fRate; f32 fDist2Rate; f32 fDistD; // check all the components marked as background: for (l32IModes = 0; l32IModes < *pu8GuassUsed; l32IModes++) { tGass2value = ptGuassValue[l32IModes]; fnumerator = 0.0f; fdenominator = 0.0f; fnumerator += u8PixelValue * tGass2value.fmean; fdenominator += tGass2value.fmean * tGass2value.fmean; // no division by zero allowed if (fdenominator == 0) { bShadowRe = 0; } fRate = fnumerator / fdenominator; // if tau < a < 1 then also check the color distortion if ((fRate <= 1) && (fRate >= fShadowThreshold)) { fDist2Rate = 0.0f; fDistD = fRate * tGass2value.fmean - u8PixelValue; fDist2Rate += (fDistD * fDistD); if (fDist2Rate < fTb * tGass2value.fVar * fRate * fRate) { bShadowRe = 1; } } fWeight += tGass2value.fWeight; if (fWeight > fForegroundThreshod) { bShadowRe = 0; } } bShadowRe = 0; #endif } else { bShadowRe = 0; } if (bShadowRe == 1) { *pu8ForgroundPixel = 0; } else { *pu8ForgroundPixel = 255; } } } } l32 BGMGuassMogProcess_cuda(void *pvModelParam, u8 *pu8CurrentImage, u8 *puForegroundImage) { l32 l32Row, l32Col, l32Temp, l32I; l32 bBackground, l32Fg, l32Diff; TGuass2Model* ptGuassModel; TGuass2Value* ptGuassValue; u8* pu8GuassUsed; u8* pu8CurrentPixel; u8* pu8ForgroundPixel; l32 bShadowRe; if (pvModelParam == NULL || pu8CurrentImage == NULL) { printf("input param pu8CurrentImage == NULL\n"); return ERR_VEH_DETECT_PROCESS_INPUT_PARAM; } ptGuassModel = (TGuass2Model*)pvModelParam; ptGuassValue = &ptGuassModel->ptGuassValue[0]; pu8GuassUsed = &ptGuassModel->puUsedGuassNum[0]; pu8CurrentPixel = &pu8CurrentImage[0]; pu8ForgroundPixel = &ptGuassModel->pu8ForgroundImage[0]; ptGuassModel->l32Count++; l32Temp = MIN(2 * ptGuassModel->l32Count, BGFG_MOG2_HISTORY); ptGuassModel->fLearnRate = 1 / (f32)l32Temp; //估计光照 BGGuass2EstimateLum(ptGuassModel, pu8CurrentPixel); //根据估计结果检测前景 for (l32I = 0; l32I < ptGuassModel->l32Size; l32I++) { l32Fg = pu8CurrentPixel[l32I]; if (l32Fg == 0) { //为零的前景认为是超出roi区域的 puForegroundImage[l32I] = 0; } else { bBackground = ptGuassModel->pu8BackgroundImage[l32I] + ptGuassModel->l32BgLumAdj; l32Diff = (l32Fg > bBackground) ? (l32Fg - bBackground) : (bBackground - l32Fg); if (l32Diff > 20) { puForegroundImage[l32I] = 255; if ((l32Fg < bBackground) && (l32Fg >(bBackground * 50 / 100))) { puForegroundImage[l32I] = 128; } } else { puForegroundImage[l32I] = 0; } } } dim3 dim3Block_rect((ptGuassModel->l32ImageWidth+31) / 32, (ptGuassModel->l32ImageHeight+7) / 8, 1); dim3 dim3threads_rect(32, 8, 1); kerUpdatePixelBackgroundGMM2<< <dim3Block_rect, dim3threads_rect >> >( ptGuassModel->ptGuassValue, pu8CurrentImage, ptGuassModel->puUsedGuassNum, ptGuassModel->pu8ForgroundImage, ptGuassModel->fLearnRate, ptGuassModel->fForegroundThreshod, ptGuassModel->fDeviationThreshold, ptGuassModel->fTb, ptGuassModel->fCT, ptGuassModel->bShadowDetection, ptGuassModel->fShadowThreshold, ptGuassModel->l32ImageWidth, ptGuassModel->l32ImageHeight, ptGuassModel->l32ImageWidth ); checkCudaErrors(cudaDeviceSynchronize()); //memcpy(puForegroundImage , ptGuassModel->pu8ForgroundImage, ptGuassModel->l32ImageWidth * ptGuassModel->l32ImageHeight); BGMGuassMogGetBGImage(ptGuassModel); return EStatus_Success; } __global__ void BilinearZoom_CheckBoundary_Kernel(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32XStride, u32 u32XPositionInit, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride, u32 u32YStride, u32 u32YPositionInit) { u32 u32YSrc, u32RowIndex, u32LineIndex, u32WY2, u32WY1; u32 u32XPosition, u32XSrc, u32WX2, u32WX1, u32Vn0, u32Vn1; u8 *pu8SrcLine1; u8 *pu8SrcLine2; u8 *pu8Dst; u32LineIndex = (blockIdx.x) * blockDim.x + threadIdx.x; u32RowIndex = (blockIdx.y) * blockDim.y + threadIdx.y; if(u32LineIndex<u32DstWidth && u32RowIndex<u32DstHeight){ u32 u32YPosition = u32YPositionInit + (u32RowIndex)*u32YStride; u32YSrc = u32YPosition >> 16; u32WY2 = (u32YPosition << 16) >> 29; u32WY1 = 8 - u32WY2; u32WY2 *= 1024; u32WY1 *= 1024; pu8SrcLine1 = pu8Image1 + u32YSrc * u32SrcStride; pu8SrcLine2 = u32WY2 == 0 ? pu8SrcLine1 : pu8SrcLine1 + u32SrcStride; pu8Dst = pu8Image2 + u32RowIndex * u32DstStride + u32LineIndex; u32XPosition = u32XPositionInit + (u32LineIndex)*u32XStride; u32XSrc = u32XPosition >> 16; u32WX2 = (u32XPosition << 16) >> 29; u32WX1 = 8 - u32WX2; u32Vn0 = (pu8SrcLine1[u32XSrc] * u32WX1 + pu8SrcLine1[u32XSrc + 1] * u32WX2); u32Vn1 = (pu8SrcLine2[u32XSrc] * u32WX1 + pu8SrcLine2[u32XSrc + 1] * u32WX2); *pu8Dst = (u8)((u32Vn0 * u32WY1 + u32Vn1 * u32WY2 + 0x8000) >> 16); } } void BilinearZoom_c_cuda(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride) { u32 u32XStride, u32YStride; u32 u32YPositionInit = 0; u32 u32XPositionInit = 0; u32XStride = ((u32SrcWidth - 1) << 16) / (u32DstWidth - 1);//pow(2,16)*Src_width/Dst_width u32YStride = ((u32SrcHeight - 1) << 16) / (u32DstHeight - 1);//pow(2,16)*Src_height/Dst_height if (0 == ((u32XStride << 16) >> 27)) { u32XStride = ((u32SrcWidth - 2) << 16) / (u32DstWidth - 1);//pow(2,16)*Src_width/Dst_width u32XPositionInit = 5 << 15; } if ((u32SrcHeight != u32DstHeight) && (0 == ((u32YStride << 16) >> 27))) { u32YStride = ((u32SrcHeight - 2) << 16) / (u32DstHeight - 1);//pow(2,16)*Src_height/Dst_height u32YPositionInit = 1 << 15; } dim3 dimGrid((u32DstWidth+31) / 32, (u32DstHeight+7)/8); dim3 dimBlock(32, 8); BilinearZoom_CheckBoundary_Kernel << <dimGrid, dimBlock >> >(pu8Image1, pu8Image2, u32SrcWidth, u32SrcHeight, u32SrcStride, u32XStride, u32XPositionInit, u32DstWidth, u32DstHeight, u32DstStride, u32YStride, u32YPositionInit); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void kerBilinear_downgray(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32XStride, u32 u32XPositionInit, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride, u32 u32YStride, u32 u32YPositionInit, u8 *pu8GrayImg,u32 u32GrayWidth,u32 u32GrayHeight,u32 u32GrayStride) { u32 u32YSrc, u32RowIndex, u32LineIndex, u32WY2, u32WY1; u32 u32XPosition, u32XSrc, u32WX2, u32WX1, u32Vn0, u32Vn1; u8 *pu8SrcLine1; u8 *pu8SrcLine2; u8 *pu8Dst; u32 indexX,indexY; u32 sum[4],i,j; indexX = (blockIdx.x) * blockDim.x + threadIdx.x; indexY = (blockIdx.y) * blockDim.y + threadIdx.y; sum[0] = 0; sum[1] = 0; sum[2] = 0; sum[3] = 0; for(i=0;i<4;i++) for(j=0;j<4;j++) { u32LineIndex = indexX*4+j; u32RowIndex = indexY*4+i; u32 u32YPosition = u32YPositionInit + (u32RowIndex)*u32YStride; u32YSrc = u32YPosition >> 16; //垂直方向权值 u32WY2 = (u32YPosition << 16) >> 29; u32WY1 = 8 - u32WY2; //放大权值以利用_dotprsu2指令右移16位特点 u32WY2 *= 1024; u32WY1 *= 1024; pu8SrcLine1 = pu8Image1 + u32YSrc * u32SrcStride; pu8SrcLine2 = u32WY2 == 0 ? pu8SrcLine1 : pu8SrcLine1 + u32SrcStride; //在最后一行的时候,读的数据是不参与运算的,但VC不允许越界读,因此这里加判断防止读越界 pu8Dst = pu8Image2 + u32RowIndex * u32DstStride + u32LineIndex;//u32DstWidth; u32XPosition = u32XPositionInit + (u32LineIndex)*u32XStride; //定点数的高16位就是整数部分 u32XSrc = u32XPosition >> 16; //定点数的低16位就是小数部分,我们使用其高3位作为权值(范围0-8) u32WX2 = (u32XPosition << 16) >> 29; u32WX1 = 8 - u32WX2; //水平线性滤波--下面的操作对应于_dotpu4指令 u32Vn0 = (pu8SrcLine1[u32XSrc] * u32WX1 + pu8SrcLine1[u32XSrc + 1] * u32WX2); u32Vn1 = (pu8SrcLine2[u32XSrc] * u32WX1 + pu8SrcLine2[u32XSrc + 1] * u32WX2); //垂直线性滤波--下面的操作对应于_dotprsu2指令 *pu8Dst = (u8)((u32Vn0 * u32WY1 + u32Vn1 * u32WY2 + 0x8000) >> 16); //sum[i] = sum[i] + (u32)(*pu8Dst); sum[i] += *pu8Dst; } pu8GrayImg[indexY*u32GrayStride+indexX] = (((sum[0]+2)>>2)+((sum[1]+2)>>2)+((sum[2]+2)>>2)+((sum[3]+2)>>2)+2)>>2; } void BilinearZoom_c_DownSample4x4GRAY_cuda(u8 *pu8Image1, u8 *pu8Image2, u32 u32SrcWidth, u32 u32SrcHeight, u32 u32SrcStride, u32 u32DstWidth, u32 u32DstHeight, u32 u32DstStride, u8 *pu8GrayImg) { u32 u32XStride, u32YStride; u32 u32YPositionInit = 0; u32 u32XPositionInit = 0; u32XStride = ((u32SrcWidth - 1) << 16) / (u32DstWidth - 1);//pow(2,16)*Src_width/Dst_width u32YStride = ((u32SrcHeight - 1) << 16) / (u32DstHeight - 1);//pow(2,16)*Src_height/Dst_height dim3 dimGrid(u32DstWidth/ 64, u32DstHeight/64); dim3 dimBlock(16, 16); u32 u32GrayWidth = u32DstWidth/4; u32 u32GrayHeight = u32DstHeight/4; u32 u32GrayStride = u32GrayWidth; kerBilinear_downgray << <dimGrid, dimBlock >> >(pu8Image1, pu8Image2, u32SrcWidth, u32SrcHeight, u32SrcStride, u32XStride, u32XPositionInit, u32DstWidth, u32DstHeight, u32DstStride, u32YStride, u32YPositionInit, pu8GrayImg,u32GrayWidth,u32GrayHeight,u32GrayStride); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void kerYUV2RGB(u8 *pu8SrcY, u8 *pu8SrcU, u8 *pu8SrcV, u8 *pu8Dst, l32 l32Width, l32 l32Height, l32 l32YStride, l32 l32UVStride, l32 l32RGBStride) { int xx, yy; gpu_type gY[4]; gpu_type gU, gV, gR, gG, gB; u8 *pDst[4]; int i; xx = (blockIdx.x * blockDim.x + threadIdx.x) * 2; yy = (blockIdx.y * blockDim.y + threadIdx.y) * 2; pDst[0] = pu8Dst + (l32Height - 1 - yy) * l32RGBStride + xx * 3; pDst[1] = pu8Dst + (l32Height - 1 - yy) * l32RGBStride + xx * 3 + 3; pDst[2] = pu8Dst + (l32Height - 1 - yy - 1) * l32RGBStride + xx * 3; pDst[3] = pu8Dst + (l32Height - 1 - yy - 1) * l32RGBStride + xx * 3 + 3; gY[0] = pu8SrcY[yy * l32YStride + xx] - 16; gY[1] = pu8SrcY[yy * l32YStride + xx + 1] - 16; gY[2] = pu8SrcY[(yy + 1) * l32YStride + xx] - 16; gY[3] = pu8SrcY[(yy + 1) * l32YStride + xx + 1] - 16; gU = pu8SrcU[yy / 2 * l32UVStride + xx / 2] - 128; gV = pu8SrcV[yy / 2 * l32UVStride + xx / 2] - 128; for (i = 0; i < 4; i++) { gR = (298 * gY[i] + 516 * gU + 128) / (gpu_type)256; gG = (298 * gY[i] - 100 * gU - 208 * gV + 128) / (gpu_type)256; gB = (298 * gY[i] + 409 * gV + 128) / (gpu_type)256; #ifdef __GPU_FLOAT__ gR = fmax(0, gR); gR = fmin(255, gR); gG = fmax(0, gG); gG = fmin(255, gG); gB = fmax(0, gB); gB = fmin(255, gB); #else gR = max(0, gR); gR = min(255, gR); gG = max(0, gG); gG = min(255, gG); gB = max(0, gB); gB = min(255, gB); #endif pDst[i][0] = gR; pDst[i][1] = gG; pDst[i][2] = gB; } } void YUV2RGB24Roi_cuda(stImage *ptImage, u8 *pu8RGB24Dst, l32 l32DstStride) { l32 l32Width, l32Height; u8 *pu8Y = (u8 *)ptImage->pu8Y; u8 *pu8U = (u8 *)ptImage->pu8U; u8 *pu8V = (u8 *)ptImage->pu8V; l32Width = ptImage->l32Width; l32Height = ptImage->l32Height; kerYUV2RGB<<<dim3(l32Width / (2 * 32), l32Height / (2 * 8)), dim3(32, 8)>>>(pu8Y, pu8U, pu8V, pu8RGB24Dst, l32Width, l32Height, l32Width, l32Width / 2, l32DstStride); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void kerDia0(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32Stride) { #define MAX_OFFSET 3 u8 u8Src0[POINT_PER_THREAD + 2 * MAX_OFFSET], u8Src1[POINT_PER_THREAD + 2 * MAX_OFFSET]; __shared__ u8 u8Sh[(16 + 1) * 2 * MAX_OFFSET * 8]; //warning: the number "16" should be equal to blockDim.x and "8" should be equal to blockDim.y u8 *ptr0, *ptr1, *pSh; u8 umin, umax; int i, j; int offset, pointPerThread; pointPerThread = POINT_PER_THREAD; pu8Src = pu8Src + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; pu8Dst = pu8Dst + (blockDim.y * blockIdx.y + threadIdx.y) * l32Width + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; ptr0 = u8Src0 + MAX_OFFSET; ptr1 = u8Src1 + MAX_OFFSET; for (i = -1 * MAX_OFFSET; i < pointPerThread + MAX_OFFSET; i++) { ptr0[i] = pu8Src[i]; } //dialate 1 offset = 1; for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } ptr1[i] = umax; } //dialate 2 offset = 3; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr1[i]; pSh[i + offset] = ptr1[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr1[i - offset] = pSh[i - offset]; ptr1[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umin = 255; for (j = offset; j > 0; j--) { if (umin > ptr1[i + j]) { umin = ptr1[i + j]; } if (umin > ptr1[i - j]) { umin = ptr1[i - j]; } } if (umin > ptr1[i]) { umin = ptr1[i]; } ptr0[i] = umin; pu8Dst[i] = umin; } //dialate 3 offset = 2; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr0[i]; pSh[i + offset] = ptr0[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr0[i - offset] = pSh[i - offset]; ptr0[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } pu8Src[i] = umax; pu8Dst[i] = umax; } #undef MAX_OFFSET } __global__ void kerDia1(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32Stride) { #define MAX_OFFSET 4 u8 u8Src0[POINT_PER_THREAD + 2 * MAX_OFFSET], u8Src1[POINT_PER_THREAD + 2 * MAX_OFFSET]; __shared__ u8 u8Sh[(16 + 1) * 2 * MAX_OFFSET * 8]; //warning: the number "16" should be equal to blockDim.x and "8" should be equal to blockDim.y u8 *ptr0, *ptr1, *pSh; u8 umin, umax; int i, j; int offset, pointPerThread; pointPerThread = POINT_PER_THREAD; pu8Src = pu8Src + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; pu8Dst = pu8Dst + (blockDim.y * blockIdx.y + threadIdx.y) * l32Width + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; ptr0 = u8Src0 + MAX_OFFSET; ptr1 = u8Src1 + MAX_OFFSET; for (i = -1 * MAX_OFFSET; i < pointPerThread + MAX_OFFSET; i++) { ptr0[i] = pu8Src[i]; } //dialate 1 offset = 2; for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } ptr1[i] = umax; } //dialate 2 offset = 4; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr1[i]; pSh[i + offset] = ptr1[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr1[i - offset] = pSh[i - offset]; ptr1[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umin = 255; for (j = offset; j > 0; j--) { if (umin > ptr1[i + j]) { umin = ptr1[i + j]; } if (umin > ptr1[i - j]) { umin = ptr1[i - j]; } } if (umin > ptr1[i]) { umin = ptr1[i]; } ptr0[i] = umin; pu8Dst[i] = umin; } //dialate 3 offset = 2; pSh = offset * (1 + 2 * threadIdx.x + 2 * (1 + blockDim.x) * threadIdx.y) + u8Sh; for (i = 0; i < offset; i++) { pSh[i] = ptr0[i]; pSh[i + offset] = ptr0[pointPerThread - offset + i]; } //store edge data __syncthreads(); //revise the edge if (blockIdx.x == 0 && threadIdx.x == 0) { for (i = 1; i <= offset; i++) { pSh[0 - i] = pSh[0]; } } if (blockIdx.x == gridDim.x - 1 && threadIdx.x == blockDim.x - 1) { for (i = 2 * offset; i < 3 * offset; i++) { pSh[i] = pSh[offset * 2 - 1]; } } __syncthreads(); for (i = 0; i < offset; i++) { ptr0[i - offset] = pSh[i - offset]; ptr0[pointPerThread + i] = pSh[i + 2 * offset]; } //load edge data __syncthreads(); for (i = 0; i < pointPerThread; i++ ) { umax = 0; for (j = offset; j > 0; j--) { if (umax < ptr0[i + j]) { umax = ptr0[i + j]; } if (umax < ptr0[i - j]) { umax = ptr0[i - j]; } } if (umax < ptr0[i]) { umax = ptr0[i]; } pu8Src[i] = umax; pu8Dst[i] = umax; } #undef MAX_OFFSET } void roughPart4(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32Height, l32 l32Stride) { kerDia0<<<dim3(l32Width / (POINT_PER_THREAD * 16), l32Height / (8 * 2)), dim3(16, 8)>>>(pu8Src + 6, pu8Dst, l32Width, l32Stride); //the dimension should not be changed! kerDia1<<<dim3(l32Width / (POINT_PER_THREAD * 16), l32Height / (8 * 2)), dim3(16, 8)>>>(pu8Src + 6 + (l32Width + 12) * l32Height / 2, pu8Dst + l32Width * l32Height / 2, l32Width, l32Stride); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void kerLPFilter(u8 *pu8Src, u8 *pu8Dst, l32 l32Max, l32 l32Width, l32 l32Stride) { #define FILTER_LENGTH 13 gpu_type qu[FILTER_LENGTH]; gpu_type lpf[]={3, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 5, 3}; gpu_type gSum; int head; int i, j; int pointPerThread; pointPerThread = POINT_PER_THREAD; pu8Src = pu8Src + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; pu8Dst = pu8Dst + (blockDim.y * blockIdx.y + threadIdx.y) * l32Stride + (threadIdx.x + blockIdx.x * blockDim.x) * pointPerThread; for (i = -6; i <= 6; i++) { qu[i + 6] = *(pu8Src + i) * 250 / l32Max; } head = 0; for (i = 0; i < pointPerThread; i++) { gSum = 0; for (j = 0; j < FILTER_LENGTH; j++) { gSum += qu[(j + head) % FILTER_LENGTH] * lpf[j]; } pu8Dst[i] = gSum / 64; qu[(j + head) % FILTER_LENGTH] = pu8Src[i + (FILTER_LENGTH - 1) / 2 + 1] * 250 / l32Max; head++; } #undef FILTER_LENGTH } void roughPart6(u8 *pu8Src, u8 *pu8Dst, l32 l32Max, l32 l32Width, l32 l32Height, l32 l32Stride) { kerLPFilter<<<dim3(l32Width / (16 * POINT_PER_THREAD), l32Height / 4), dim3(16, 4)>>>(pu8Src + 6, pu8Dst + 6, l32Max, l32Width, l32Stride); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void kerGetFeature_opt(u8 *pu8RGB24, l32 l32Width, l32 l32Height, l32 l32Stride, u8 *pu8FeatureImage) { l32 xx = blockIdx.x * blockDim.x + threadIdx.x; l32 yy = blockIdx.y * blockDim.y + threadIdx.y; l32 xx_, xx0, xx1, yy_, yy0, yy1, bxx1; u8 *pSrc_, *pSrc0, *pSrc1; gpu_type f__, f_0, f_1; gpu_type f0_, f00, f01; gpu_type f1_, f10, f11; gpu_type b__, b_0, b_1; gpu_type b0_, b00, b01; gpu_type tmp_, tmp0, tmp1; gpu_type res1, res2; gpu_type fsum; int i, j; yy0 = yy * 4; yy_ = max(0, yy0 - 1); yy1 = min(l32Height - 1, yy0 + 1); pSrc_ = yy_ * l32Stride + pu8RGB24; pSrc0 = yy0 * l32Stride + pu8RGB24; pSrc1 = yy1 * l32Stride + pu8RGB24; xx0 = xx * 4; xx_ = max(0, xx0 - 1); xx1 = min(l32Width - 1, xx0 + 1); bxx1 = xx1; f__ = (gpu_type)pSrc_[xx_]; f_0 = (gpu_type)pSrc_[xx0]; f_1 = (gpu_type)pSrc_[xx1]; f0_ = (gpu_type)pSrc0[xx_]; f00 = (gpu_type)pSrc0[xx0]; f01 = (gpu_type)pSrc0[xx1]; f1_ = (gpu_type)pSrc1[xx_]; f10 = (gpu_type)pSrc1[xx0]; f11 = (gpu_type)pSrc1[xx1]; fsum = 0; for (j = 0; j < 4; j++) { b__ = f0_; b_0 = f00; b_1 = f01; b0_ = f1_; b00 = f10; b01 = f11; for (i = 0; i < 4; i++) { #ifdef __GPU_FLOAT__ tmp_ = fabs(f__ - f_1); tmp0 = fabs(f0_ - f01); tmp1 = fabs(f1_ - f11); #else tmp_ = abs(f__ - f_1); tmp0 = abs(f0_ - f01); tmp1 = abs(f1_ - f11); #endif res1 = ((tmp_ + tmp0 + 1) / 2 + (tmp0 + tmp1 + 1) / 2 + 1) / 2; #ifdef __GPU_FLOAT__ tmp_ = fabs(f__ - f1_); tmp0 = fabs(f_0 - f10); tmp1 = fabs(f_1 - f11); #else tmp_ = abs(f__ - f1_); tmp0 = abs(f_0 - f10); tmp1 = abs(f_1 - f11); #endif res2 = ((tmp_ + tmp0 + 1) / 2 + (tmp0 + tmp1 + 1) / 2 + 1) / 2; if (res1 < res2) { res1 = 0.0; } fsum += res1; xx1 = min(l32Width - 1, xx1 + 1); f__ = f_0; f_0 = f_1; f_1 = (gpu_type)pSrc_[xx1]; f0_ = f00; f00 = f01; f01 = (gpu_type)pSrc0[xx1]; f1_ = f10; f10 = f11; f11 = (gpu_type)pSrc1[xx1]; } f__ = b__; f_0 = b_0; f_1 = b_1; f0_ = b0_; f00 = b00; f01 = b01; pSrc_ = pSrc0; pSrc0 = pSrc1; xx1 = bxx1; yy1 = min(l32Height - 1, yy1 + 1); pSrc1 = yy1 * l32Stride + pu8RGB24; f1_ = (gpu_type)pSrc1[xx_]; f10 = (gpu_type)pSrc1[xx0]; f11 = (gpu_type)pSrc1[xx1]; } fsum /= (gpu_type)8; #ifdef __GPU_FLOAT__ fsum = fmin(fsum, (gpu_type)255.0); #else fsum = min(fsum, (gpu_type)255); #endif l32Stride /= 4; (pu8FeatureImage + yy * l32Stride)[xx] = (u8)fsum; } void GetFeatureYUVImage_SobelDownsample4x4_cuda(u8 *pu8RGB24, l32 l32Width, l32 l32Height, l32 l32Stride, u8 *pu8FeatureImage, u8 *pu8Temp) { kerGetFeature_opt<<<dim3(l32Width / (4 * 32), l32Height / (4 * 8)), dim3(32, 8)>>>(pu8RGB24, l32Width, l32Height, l32Stride, pu8FeatureImage); checkCudaErrors(cudaDeviceSynchronize()); } void DilateLine(u8 *pu8Src, u8 *pu8Dst, l32 l32Width, l32 l32DilateEleWidth) { l32 l32x, l32i1, l32i2, l32i, l32vmin, l32vmax; l32 l32EleWidth; l32 l32IsErode; if(l32DilateEleWidth > 0) { l32EleWidth = l32DilateEleWidth; l32IsErode = 0; } else { l32EleWidth = -l32DilateEleWidth; l32IsErode = 1; } for(l32x = 0; l32x < l32Width; l32x++) { l32i1 = l32x - l32EleWidth; l32i2 = l32x + l32EleWidth; if(l32i1 < 0) { l32i1 = 0; } if(l32i2 > l32Width - 1) { l32i2 = l32Width - 1; } l32vmin = 255; l32vmax = 0; if(l32IsErode) { for(l32i = l32i1; l32i <= l32i2; l32i++) { if(l32vmin > pu8Src[l32i]) { l32vmin = pu8Src[l32i]; } } pu8Dst[l32x] = l32vmin; } else { for(l32i = l32i1; l32i <= l32i2; l32i++) { if(l32vmax < pu8Src[l32i]) { l32vmax = pu8Src[l32i]; } } pu8Dst[l32x] = l32vmax; } } } l32 VehLprLocMaxProcess_cuda(stLocLcMax * ptLocLcMax, stLocInput *ptLocInput, stLocOutput *ptLocOutput) { l32 l32X, l32Y, l32i0, l32i1, l32i; l32 l32Xs, l32Xe, l32Ys, l32Ye; u8 *pu8Dst, *pu8Src, *pu8Temp, *pu8TempH, *pu8TempV; s16 *ps16SumTmp; u8 *pu8SrcTmp; l32 l32Width, l32Height; l32 l32PlateWidth, l32PlateHeight; l32 l32Temp0, l32Temp1, l32Temp2; l32 l32OffSet; u32 u32Temp0, u32Temp1; u32 u32Temp2, u32Temp3; u32 u32Temp4, u32Temp5; u32 u32Temp6, u32Temp7; l32 l32Val, l32Max; l32 l32Xs0,l32Xe0,l32Ys0,l32Ye0; l32 l32RectID; LCMAX_POS aLCMaxPos[20]; l32 l32PeakS, l32LcX, l32LcY, l32MeetMax; TVehLprLcRect tRoiRect; l32 l32DesMapExtStride; l32 l32DensMapStride; u32 u32V0, u32V1, u32V2, u32V3; u32 u32V4, u32V5, u32V6, u32V7; u32 u32V8, u32V9, u32V10, u32V11, u32V12; u8 *RESTRICT pu8TmpDst1 = NULL; u16 *RESTRICT pu16TmpDstV = NULL; u16 *RESTRICT pu16TmpDstH = NULL; u16 *RESTRICT pu16TmpSrc = NULL; u8 *RESTRICT pu8TmpSrc1 = NULL; u8 *RESTRICT pu8TmpSrc2 = NULL; u8 *RESTRICT pu8TmpSrc3 = NULL; s64 Res0, Res1, lMask; u64 u64X0, u64X1, u64X2, u64X3, u64X4, u64X5, u64X6, u64X7; u64 u64Data0, u64Data1, u64Data2, u64Data3, u64Data4, u64Data5, u64Data6, u64Data7; u64 u64Mask0, u64Mask1, u64Mask2, u64Mask3; u64 u64Tmp0, u64Tmp1, u64Tmp2, u64Tmp3; u64 u64Const9; l32 l32LoopA; myTimer timer; memset(ptLocLcMax->pu8EdgeDensMap, 0, ptLocInput->l32Width * ptLocInput->l32Height / 16); for(l32RectID = 0; l32RectID < MAX_LOCNUM; l32RectID++) { ptLocOutput->ptRect[l32RectID].l32Valid = 0; } ptLocOutput->l32RectCount = MAX_LOCNUM; GetFeatureYUVImage_SobelDownsample4x4_cuda (ptLocInput->pu8SrcImg, ptLocInput->l32Width, ptLocInput->l32Height, ptLocInput->l32Stride, ptLocLcMax->pu8EdgeDensMapOrg, NULL); l32Width = (ptLocInput->l32Width/4); l32Height = (ptLocInput->l32Height/4); /* tRoiRect = ptLocLcMax->tRoiRect; tRoiRect.l32top = tRoiRect.l32top/4; tRoiRect.l32bottom = tRoiRect.l32bottom/4; tRoiRect.l32left = tRoiRect.l32left/4; tRoiRect.l32right = tRoiRect.l32right/4; startTimer(); pu8Dst = ptLocLcMax->pu8EdgeDensMapOrg; for(l32Y = 0; l32Y<l32Height; l32Y++) { if((l32Y < tRoiRect.l32top)||(l32Y > tRoiRect.l32bottom)) { memset(pu8Dst, 0, l32Width); } else { memset(pu8Dst, 0, tRoiRect.l32left); memset(pu8Dst + tRoiRect.l32right, 0, l32Width - tRoiRect.l32right); } pu8Dst += l32Width; } dispTimer("part1"); */ //Really stupid. for (l32Y = 0; l32Y < l32Height; l32Y++) { ptLocLcMax->pu8EdgeDensMapOrg[l32Y * l32Width + l32Width - 1] = 0; } pu8Src = ptLocLcMax->pu8EdgeDensMapOrg; pu8Dst = ptLocLcMax->pu8EdgeDensMap; l32DensMapStride = ((l32Width/2) + 7)& 0xFFFFFFF8; l32DesMapExtStride = l32DensMapStride + 2 * EDGE_WIDTH; //多扩展8字节,是为了当(l32Width/2)已经8字节对齐时,边界扩展8字节,便于Dilate操作,利用SIMD优化 for(l32Y = 0; l32Y<l32Height; l32Y++) { for (l32X = 0; l32X < EDGE_WIDTH; l32X++) { pu8Dst[l32X] = 0; } for(; l32X < EDGE_WIDTH + l32Width / 2; l32X++) { pu8Dst[l32X] = (pu8Src[2 * (l32X - EDGE_WIDTH)] + pu8Src[2 * (l32X - EDGE_WIDTH) + 1] + 1) >> 1; } for(; l32X < l32DesMapExtStride; l32X++) { pu8Dst[l32X] = 0; } pu8Src += l32Width; pu8Dst += l32DesMapExtStride; } l32Width = l32Width / 2; pu8Dst = (u8*)ptLocLcMax->pu8Temp + l32Width; //临时复用此缓存 ps16SumTmp = (s16 *)ptLocLcMax->pu8Temp; ps16SumTmp = (s16 *)(((u32)ps16SumTmp + 32) & (~7)); memset(ps16SumTmp, 0, sizeof(s16) * l32Width); pu8Dst = (u8 *)(ps16SumTmp + l32Width + 16); pu8TempH = pu8Dst + l32Width; pu8TempV = pu8TempH + l32Width; //首先进行预处理的加法 //for(l32i = l32Y - 8;l32i <= l32Y + 8; l32i++) #define HEIGHT_RADIUS 8 //定义垂直半径为8 for(l32X = 0; l32X < l32Width; l32X++) { l32Temp0 = 0; for(l32i = -(HEIGHT_RADIUS + 1); l32i <= (HEIGHT_RADIUS - 1); l32i++) { l32i0 = l32i; if(l32i0 < 0) { l32i0 = 0; } if(l32i0 > l32Height - 1) { l32i0 = l32Height - 1; } l32Temp0 += ptLocLcMax->pu8EdgeDensMap[EDGE_WIDTH + l32X + l32i0 * l32DesMapExtStride]; } ps16SumTmp[l32X] = l32Temp0; } for(l32Y = 0; l32Y < l32Height; l32Y ++) { u8 *pu8SrcRowH, *pu8SrcRowT; //此种方式l32Width 不能小于 5 否则程序报错 //但是一般情况下l32Width 不会小于 5,故从性能优化出发,此特种情况不予考虑 //for(l32i = l32X - 4; l32i <= l32X + 4; l32i++) //水平9点滤波 #define WIDTH_RADIUS 4 //定义半径为4 pu8SrcRowH = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8SrcRowT = pu8SrcRowH + WIDTH_RADIUS; //Sum: -5,-4,-3,-2,-1,0,1,2,3 l32Temp2 = pu8SrcRowH[0] * 6 + pu8SrcRowH[1] + pu8SrcRowH[2] + pu8SrcRowH[3]; for(l32X = 0; l32X < WIDTH_RADIUS + 1; l32X++) { l32Temp2 -= *pu8SrcRowH; l32Temp2 += *pu8SrcRowT; pu8TempH[l32X] = l32Temp2 / (WIDTH_RADIUS * 2 + 1); pu8SrcRowT++; } for(; l32X < l32Width - (WIDTH_RADIUS + 1); l32X++) { l32Temp2 -= *pu8SrcRowH; l32Temp2 += *pu8SrcRowT; pu8TempH[l32X] = l32Temp2 / (WIDTH_RADIUS * 2 + 1); pu8SrcRowH++; pu8SrcRowT++; } for(; l32X < l32Width; l32X++) { l32Temp2 -= *pu8SrcRowH; l32Temp2 += *pu8SrcRowT; pu8TempH[l32X] = l32Temp2 / (WIDTH_RADIUS * 2 + 1); pu8SrcRowH++; } l32i0 = l32Y - (HEIGHT_RADIUS + 1); if(l32i0 < 0) { l32i0 = 0; } l32i1 = l32Y + HEIGHT_RADIUS; if(l32i1 > l32Height - 1) { l32i1 = l32Height - 1; } pu8SrcRowH = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32i0 * l32DesMapExtStride; pu8SrcRowT = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32i1 * l32DesMapExtStride; for(l32X = 0; l32X < l32Width; l32X++) { //垂直滤波 ps16SumTmp[l32X] -= pu8SrcRowH[l32X]; ps16SumTmp[l32X] += pu8SrcRowT[l32X]; l32Temp1 = ps16SumTmp[l32X]; pu8TempV[l32X] = l32Temp1 / (HEIGHT_RADIUS * 2 + 1); } DilateLine(pu8TempV, pu8Dst, l32Width, 1); for(l32X = EDGE_WIDTH; l32X < l32Width + EDGE_WIDTH; l32X++) { if(pu8Dst[l32X - EDGE_WIDTH] < pu8TempH[l32X - EDGE_WIDTH]) { ptLocLcMax->pu8EdgeDensMap2[l32X + l32Y * l32DesMapExtStride] = ptLocLcMax->pu8EdgeDensMap[l32X + l32Y * l32DesMapExtStride]; } else { ptLocLcMax->pu8EdgeDensMap2[l32X + l32Y * l32DesMapExtStride] = 0; } } //edge point is handled here for (l32X = 0; l32X < EDGE_WIDTH; l32X++) { ptLocLcMax->pu8EdgeDensMap2[l32X + l32Y * l32DesMapExtStride] = ptLocLcMax->pu8EdgeDensMap2[EDGE_WIDTH + l32Y * l32DesMapExtStride]; ptLocLcMax->pu8EdgeDensMap2[EDGE_WIDTH + l32Width + l32X + l32Y * l32DesMapExtStride] = ptLocLcMax->pu8EdgeDensMap2[EDGE_WIDTH + l32Width - 1 + l32Y * l32DesMapExtStride]; } } roughPart4(ptLocLcMax->pu8EdgeDensMap2, ptLocLcMax->pu8EdgeDensMapMoph, l32Width, l32Height, l32DesMapExtStride); l32Max = 0; for(l32Y = 0; l32Y < l32Height; l32Y++) { pu8Src = ptLocLcMax->pu8EdgeDensMap2 + l32Y * l32DesMapExtStride; for(l32X = EDGE_WIDTH; l32X < l32Width + EDGE_WIDTH; l32X++) { if(pu8Src[l32X] < 10) { pu8Src[l32X] = 0; } if(l32Max < pu8Src[l32X]) { l32Max = pu8Src[l32X]; } } for (l32X = 0; l32X < EDGE_WIDTH; l32X++) { pu8Src[l32X] = pu8Src[EDGE_WIDTH]; } for (l32X = l32Width + EDGE_WIDTH; l32X < l32Width + EDGE_WIDTH * 2; l32X++) { pu8Src[l32X] = pu8Src[l32Width + EDGE_WIDTH - 1]; } } if(l32Max < 0) { l32Max = 250; } roughPart6(ptLocLcMax->pu8EdgeDensMap2, ptLocLcMax->pu8EdgeDensMap, l32Max, l32Width, l32Height, l32DesMapExtStride); //下面是一个垂直平滑滤波,权值1 2 1 for(l32Y = 0; l32Y < l32Height; l32Y++) { // pu8Src = ptLocLcMax->pu8EdgeDensMap + l32DesMapExtStride + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8Src = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8Dst = ptLocLcMax->pu8EdgeDensMap2 + EDGE_WIDTH + l32Y * l32DesMapExtStride; if(l32Y == 0) { for(l32X = 0; l32X < l32Width; l32X++) { pu8Dst[l32X] = (pu8Src[l32X] + pu8Src[l32X + l32DesMapExtStride]) >> 1; } } else if(l32Y == l32Height - 1) { for(l32X=0; l32X<l32Width; l32X++) { pu8Dst[l32X] = (pu8Src[l32X] + pu8Src[l32X - l32DesMapExtStride]) >> 1; } } else { for(l32X = 0; l32X < l32Width; l32X++) { pu8Dst[l32X] = (pu8Src[l32X - l32DesMapExtStride] + 2*pu8Src[l32X] + pu8Src[l32X + l32DesMapExtStride]) >> 2; } } for(l32X = 0; l32X < l32Width; l32X++) { if(pu8Dst[l32X] < 10) { pu8Dst[l32X] = 0; } } } for(l32Y = 0; l32Y < l32Height; l32Y++) { memset(ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride, 0, l32Width); } memset(aLCMaxPos, 0, sizeof(aLCMaxPos)); for(l32Y = 1; l32Y < l32Height; l32Y++) { pu8Src = ptLocLcMax->pu8EdgeDensMap2 + EDGE_WIDTH + l32Y * l32DesMapExtStride; pu8Dst = ptLocLcMax->pu8EdgeDensMap + EDGE_WIDTH + l32Y * l32DesMapExtStride; l32PeakS = -1; l32Val = 0; for(l32X = 1; l32X < l32Width; l32X++) { if((pu8Src[l32X] < pu8Src[l32X-1]) || (l32X == (l32Width - 1))) { if(l32Val == 1) { if(l32PeakS < 0) l32PeakS = l32X - 1; l32MeetMax = 0; for(l32i = l32PeakS; l32i < l32X; l32i++) { if(pu8Src[l32i] >= pu8Src[l32i-l32DesMapExtStride] && pu8Src[l32i] >= pu8Src[l32i-l32DesMapExtStride-1] && pu8Src[l32i] >= pu8Src[l32i-l32DesMapExtStride+1] && pu8Src[l32i] >= pu8Src[l32i+l32DesMapExtStride] && pu8Src[l32i] >= pu8Src[l32i+l32DesMapExtStride-1] && pu8Src[l32i] >= pu8Src[l32i+l32DesMapExtStride+1]) { //pu8Dst[l32i] = 255; l32MeetMax = 1; } } //遇到局部最大值,此段内部中点被视为真实局部最大值 if(l32MeetMax) { l32LcX = (l32PeakS + l32X)/2; l32LcY = l32Y; if(pu8Src[l32LcX] > 0) { pu8Dst[l32LcX] = 255; //局部最大值点 for(l32i1 = 0;l32i1 < sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0]); l32i1++) { if(aLCMaxPos[l32i1].l32Val < pu8Src[l32LcX]) { //插入位置 break; } } if(l32i1 < sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0])) { //移动出空位 for(l32i = sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0])-1;l32i > l32i1; l32i--) { aLCMaxPos[l32i] = aLCMaxPos[l32i-1]; } aLCMaxPos[l32i1].l32PosX = l32X; aLCMaxPos[l32i1].l32PosY = l32LcY; aLCMaxPos[l32i1].l32Val = pu8Src[l32LcX]; } } } } l32PeakS = -1; l32Val = -1; } else if(pu8Src[l32X] == pu8Src[l32X - 1]) { if(l32PeakS < 0) { l32PeakS = l32X - 1; } } else if(pu8Src[l32X] > pu8Src[l32X - 1]) { l32Val = 1; //上升中 l32PeakS = -1; } } } l32RectID = 0; memset(ptLocLcMax->pu8EdgeDensMap2, 0, l32Width * l32Height); for(l32i = 0;l32i < sizeof(aLCMaxPos)/sizeof(aLCMaxPos[0]); l32i++) { l32X = aLCMaxPos[l32i].l32PosX; l32Y = aLCMaxPos[l32i].l32PosY; if(aLCMaxPos[l32i].l32Val == 0) { break; } l32Val = 10; //经过尝试,此方法稳定性最好 pu8Src = ptLocLcMax->pu8EdgeDensMapMoph + l32X; for(l32Ys = aLCMaxPos[l32i].l32PosY; l32Ys > aLCMaxPos[l32i].l32PosY - 3; l32Ys--) { if(pu8Src[l32Ys * l32DensMapStride] < l32Val) { break; } } for(l32Ye = aLCMaxPos[l32i].l32PosY; l32Ye < aLCMaxPos[l32i].l32PosY + 3; l32Ye++) { if(pu8Src[l32Ye*l32DensMapStride] < l32Val) { break; } } /* if(ptLocLcMax->l32UpsideDown == 0) { if(l32Y < l32Height/2) { l32Ys ++; } } else { // 目前输入yuv图像上面小,下面大 if(l32Y > l32Height/2) { l32Ys ++; } } */ //only the following will be exectued if(l32Y > l32Height/2) { l32Ys ++; } pu8Temp = (u8*)ptLocLcMax->pu8Temp; for(l32Xs = 0; l32Xs < l32Width; l32Xs++) { l32Val = 0; for(l32Y = l32Ys; l32Y < l32Ye; l32Y++) { if(l32Val < ptLocLcMax->pu8EdgeDensMapMoph[l32Y*l32DensMapStride + l32Xs]) { l32Val = ptLocLcMax->pu8EdgeDensMapMoph[l32Y*l32DensMapStride + l32Xs]; } } l32Val = l32Val * 3; if(l32Val > 255) l32Val = 255; pu8Temp[l32Xs] = l32Val; } l32Val = pu8Temp[l32X] * 40/100; pu8Src = pu8Temp; for(l32Xs = aLCMaxPos[l32i].l32PosX; l32Xs > aLCMaxPos[l32i].l32PosX - 11; l32Xs--) { if(pu8Src[l32Xs] <= l32Val) { break; } } for(l32Xe = aLCMaxPos[l32i].l32PosX; l32Xe < aLCMaxPos[l32i].l32PosX + 11; l32Xe++) { if(pu8Src[l32Xe] <= l32Val) { break; } } if((l32Ye <= l32Ys) || (l32Xe <= l32Xs)) { continue; } if(l32Xs < 0) l32Xs = 0; if(l32Ys < 0) l32Ys = 0; if(l32Xe > l32Width-1) l32Xe = l32Width-1; if(l32Ye > l32Height-1) l32Ye = l32Height-1; l32Xs0 = l32Xs; l32Xe0 = l32Xe; l32Ys0 = l32Ys; l32Ye0 = l32Ye; { l32Xs *= 8; l32Xe *= 8; l32Ys *= 4; l32Ye *= 4; } l32PlateHeight = (l32Ye-l32Ys); l32PlateWidth = (l32Xe-l32Xs); if(aLCMaxPos[l32i].l32Val > 0) { if(l32RectID < ptLocOutput->l32RectCount) { l32 l32W,l32H; l32W = l32Xe - l32Xs; l32H = l32Ye - l32Ys; ptLocOutput->ptRect[l32RectID].l32left = l32Xs; ptLocOutput->ptRect[l32RectID].l32right = l32Xe; ptLocOutput->ptRect[l32RectID].l32top = l32Ys; ptLocOutput->ptRect[l32RectID].l32bottom = l32Ye; ptLocOutput->ptRect[l32RectID].l32Valid = 1; l32RectID ++; } } } //最终返回区域个数 ptLocOutput->l32RectCount = l32RectID; return 0; } void *thread_func_cuda(void *arg) { struct _cuda_thread_arg *th_arg = (struct _cuda_thread_arg *)arg; TVehLprLcRect tRect[MAX_LOCNUM] = {0}; myTimer timer2; ptLocInput0.l32Width = th_arg->l32Width; ptLocInput0.l32Height = th_arg->l32Height; ptLocInput0.l32Stride = th_arg->l32Stride; ptImage0.l32Width = ptLocInput0.l32Width; ptImage0.l32Height = ptLocInput0.l32Height; while (1) { sem_wait(&sem_full); if (pu8CudaInputSrc == NULL) { printf("cuda exit successfully\n"); return NULL; } timer2.start(); //copy image source //memcpy(pu8CudaImgBuf, pu8CudaInputSrc, (ptLocInput0.l32Width * ptLocInput0.l32Height * 3) >> 1); checkCudaErrors(cudaMemcpy(pu8CudaImgBuf, pu8CudaInputSrc, (ptLocInput0.l32Width * ptLocInput0.l32Height * 3) >> 1, cudaMemcpyHostToDevice)); sem_post(&sem_empty); timer2.disp("copy in"); timer2.start(); //rough locate ptLocInput0.pu8SrcImg = pu8CudaImgBuf; ptLocOutput0.ptRect = tRect; ptLocOutput0.l32RectCount = MAX_LOCNUM; VehLprLocMaxProcess_cuda(&ptLocLcMax0, &ptLocInput0, &ptLocOutput0); //yuv2rgb ptImage0.pu8Y = pu8CudaImgBuf; ptImage0.pu8U = pu8CudaImgBuf + ptLocInput0.l32Width * ptLocInput0.l32Height; ptImage0.pu8V = pu8CudaImgBuf + (ptLocInput0.l32Width * ptLocInput0.l32Height * 5) / 4; YUV2RGB24Roi_cuda(&ptImage0, pu8CudaRGBBuf, ptLocInput0.l32Width * 3); //resize BilinearZoom_c_DownSample4x4GRAY_cuda(pu8CudaImgBuf, pu8CudaZoomBuf, ptLocInput0.l32Width, ptLocInput0.l32Height, ptLocInput0.l32Stride, SCALEWIDTH, SCALEHEIGHT, SCALEWIDTH, pu8CudaGrayBuf); TGuass2Model *ptGuassModel = (TGuass2Model*)pvCudaBgModel; BilinearZoom_c_cuda(pu8CudaGrayBuf,pu8CudaFrameCurBuf, SCALEWIDTH/4,SCALEHEIGHT/4,SCALEWIDTH/4, ptGuassModel->l32ImageWidth,ptGuassModel->l32ImageHeight,ptGuassModel->l32ImageWidth); //bgm int state = BGMGuassMogProcess_cuda(ptGuassModel,pu8CudaFrameCurBuf,pu8CudaFGFrameBuf); if(state!=EStatus_Success){ printf("BGMGuass ERROR!\n"); exit(EXIT_FAILURE); } timer2.disp("cuda process"); sem_wait(&sem_finish); timer2.start(); //rough output memcpy(&atCudaRoughRectsOut, ptLocOutput0.ptRect, sizeof(TVehLprLcRect) * MAX_LOCNUM); l32CudaRectCountOut = ptLocOutput0.l32RectCount; //yuv2rgb output memcpy(pu8CudaRGBOut, pu8CudaRGBBuf, ptLocInput0.l32Width * ptLocInput0.l32Height * 3); //resize output memcpy(pu8CudaZoomOut, pu8CudaZoomBuf, SCALEWIDTH * SCALEHEIGHT); memcpy(pu8CudaGrayOut, pu8CudaGrayBuf, SCALEWIDTH * SCALEHEIGHT); memcpy(pu8CudaFrameCurOut, pu8CudaFrameCurBuf, ptGuassModel->l32ImageWidth * ptGuassModel->l32ImageHeight); //bgm output memcpy(pu8CudaFGFrameOut, pu8CudaFGFrameBuf, ptGuassModel->l32ImageWidth * ptGuassModel->l32ImageHeight); timer2.disp("copy out"); sem_post(&sem_ready); } }
345dece362dde6b80e339c7747c5b95ab9f0a45b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include <stdint.h> #include<stdio.h> #include<fstream> #include <stdlib.h> #include <malloc.h> #include <iomanip> using namespace std; #define REPEAT 1 #define STRIDE 1 #define CACHELINE 8 #define ALLIGNMENT 64 typedef unsigned long long Dtype; //typedef double Dtype; //typedef int Dtype; __global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi); int gcf(int a, int b) { if (a == 0) return b; return gcf(b % a, a); } int main(int argc, char* argv[]) { if(argc != 3) { std::cout << "Wrong number of argument!! Exiting program !!!"; return 0; } // struct timeval tv1, tv2; int N = atoi(argv[1]); int stride = atoi(argv[2]); unsigned long long *d_time, h_time; double tottime; Dtype *xj, *xi; Dtype *h_A, **d_A; int *d_N; std::ofstream fp; srand (time(NULL)); fp.open("/home/hpc/ihpc/ihpc002h/gpu-exp/mThesis/exp5/data/result.txt", std::ofstream::app); h_A = (Dtype*)memalign(ALLIGNMENT,(N+2)*sizeof(Dtype)); hipMalloc(&d_A, (N+2)*sizeof(Dtype)); hipMalloc(&d_time, sizeof(unsigned long long)); hipMalloc(&xj, sizeof(Dtype)); hipMalloc(&xi, sizeof(Dtype)); hipMalloc(&d_N, sizeof(int)); //int step = gcf (STRIDE, N); for(unsigned int i=0; i < N ; i++) { // stride = rand()%20; h_A[i] = ((Dtype)(uintptr_t)d_A) + ( (i + stride) % N)*sizeof(Dtype); // h_A[i] = i+1; } h_A[N]=0.0; h_A[N+1]=0.0; tottime = 0.0; for (int i=0; i < 10; i++) { dim3 Db = dim3(1); dim3 Dg = dim3(1,1,1); hipMemcpy(d_A, h_A, (N+2)*sizeof(Dtype), hipMemcpyHostToDevice ); hipMemcpy(d_N, &N, sizeof(int), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( VecAdd), dim3(Db),dim3(Dg), 0, 0, d_A, d_N, d_time, xj, xi); hipDeviceSynchronize(); hipMemcpy(&h_time, d_time, sizeof(unsigned long long), hipMemcpyDeviceToHost); // cout << h_time << endl; tottime += (double) h_time; } // cout << std::fixed << std::setprecision(6) << tottime << std::endl; tottime = tottime / (2560.0 * N); fp << N*8.0/1024.0 << " " << tottime << std::endl; // std::cout << std::setprecision(2); cout << std::fixed << std::setprecision(6) << tottime << std::endl; for(int i =0; i < N ; i++) { //printf("%f ",(h_A[i])); } hipFree(d_A); hipFree(d_time); free(h_A); fp.close(); }
345dece362dde6b80e339c7747c5b95ab9f0a45b.cu
#include<iostream> #include <stdint.h> #include<stdio.h> #include<fstream> #include <stdlib.h> #include <malloc.h> #include <iomanip> using namespace std; #define REPEAT 1 #define STRIDE 1 #define CACHELINE 8 #define ALLIGNMENT 64 typedef unsigned long long Dtype; //typedef double Dtype; //typedef int Dtype; __global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi); int gcf(int a, int b) { if (a == 0) return b; return gcf(b % a, a); } int main(int argc, char* argv[]) { if(argc != 3) { std::cout << "Wrong number of argument!! Exiting program !!!"; return 0; } // struct timeval tv1, tv2; int N = atoi(argv[1]); int stride = atoi(argv[2]); unsigned long long *d_time, h_time; double tottime; Dtype *xj, *xi; Dtype *h_A, **d_A; int *d_N; std::ofstream fp; srand (time(NULL)); fp.open("/home/hpc/ihpc/ihpc002h/gpu-exp/mThesis/exp5/data/result.txt", std::ofstream::app); h_A = (Dtype*)memalign(ALLIGNMENT,(N+2)*sizeof(Dtype)); cudaMalloc(&d_A, (N+2)*sizeof(Dtype)); cudaMalloc(&d_time, sizeof(unsigned long long)); cudaMalloc(&xj, sizeof(Dtype)); cudaMalloc(&xi, sizeof(Dtype)); cudaMalloc(&d_N, sizeof(int)); //int step = gcf (STRIDE, N); for(unsigned int i=0; i < N ; i++) { // stride = rand()%20; h_A[i] = ((Dtype)(uintptr_t)d_A) + ( (i + stride) % N)*sizeof(Dtype); // h_A[i] = i+1; } h_A[N]=0.0; h_A[N+1]=0.0; tottime = 0.0; for (int i=0; i < 10; i++) { dim3 Db = dim3(1); dim3 Dg = dim3(1,1,1); cudaMemcpy(d_A, h_A, (N+2)*sizeof(Dtype), cudaMemcpyHostToDevice ); cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice ); VecAdd<<<Db,Dg>>>(d_A, d_N, d_time, xj, xi); cudaDeviceSynchronize(); cudaMemcpy(&h_time, d_time, sizeof(unsigned long long), cudaMemcpyDeviceToHost); // cout << h_time << endl; tottime += (double) h_time; } // cout << std::fixed << std::setprecision(6) << tottime << std::endl; tottime = tottime / (2560.0 * N); fp << N*8.0/1024.0 << " " << tottime << std::endl; // std::cout << std::setprecision(2); cout << std::fixed << std::setprecision(6) << tottime << std::endl; for(int i =0; i < N ; i++) { //printf("%f ",(h_A[i])); } cudaFree(d_A); cudaFree(d_time); free(h_A); fp.close(); }
50742cc4327dc75fdc1392e5d73a2ebafce92a87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void leftPackingKernel(double* temperature, double* ghost, int block_size) { int j = blockDim.x * blockIdx.x + threadIdx.x; if (j < block_size) { ghost[j] = temperature[(block_size + 2) * (1 + j) + 1]; } }
50742cc4327dc75fdc1392e5d73a2ebafce92a87.cu
#include "includes.h" __global__ void leftPackingKernel(double* temperature, double* ghost, int block_size) { int j = blockDim.x * blockIdx.x + threadIdx.x; if (j < block_size) { ghost[j] = temperature[(block_size + 2) * (1 + j) + 1]; } }
7368bc8cb9057170f60b3c9cdde844ecbd6c5cd7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reader_impl.hpp" #include <io/comp/nvcomp_adapter.hpp> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/iterator_categories.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/logical.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #include <numeric> namespace cudf::io::detail::parquet { namespace { /** * @brief Generate depth remappings for repetition and definition levels. * * When dealing with columns that contain lists, we must examine incoming * repetition and definition level pairs to determine what range of output nesting * is indicated when adding new values. This function generates the mappings of * the R/D levels to those start/end bounds * * @param remap Maps column schema index to the R/D remapping vectors for that column * @param src_col_schema The column schema to generate the new mapping for * @param md File metadata information */ void generate_depth_remappings(std::map<int, std::pair<std::vector<int>, std::vector<int>>>& remap, int src_col_schema, aggregate_reader_metadata const& md) { // already generated for this level if (remap.find(src_col_schema) != remap.end()) { return; } auto schema = md.get_schema(src_col_schema); int max_depth = md.get_output_nesting_depth(src_col_schema); CUDF_EXPECTS(remap.find(src_col_schema) == remap.end(), "Attempting to remap a schema more than once"); auto inserted = remap.insert(std::pair<int, std::pair<std::vector<int>, std::vector<int>>>{src_col_schema, {}}); auto& depth_remap = inserted.first->second; std::vector<int>& rep_depth_remap = (depth_remap.first); rep_depth_remap.resize(schema.max_repetition_level + 1); std::vector<int>& def_depth_remap = (depth_remap.second); def_depth_remap.resize(schema.max_definition_level + 1); // the key: // for incoming level values R/D // add values starting at the shallowest nesting level X has repetition level R // until you reach the deepest nesting level Y that corresponds to the repetition level R1 // held by the nesting level that has definition level D // // Example: a 3 level struct with a list at the bottom // // R / D Depth // level0 0 / 1 0 // level1 0 / 2 1 // level2 0 / 3 2 // list 0 / 3 3 // element 1 / 4 4 // // incoming R/D : 0, 0 -> add values from depth 0 to 3 (def level 0 always maps to depth 0) // incoming R/D : 0, 1 -> add values from depth 0 to 3 // incoming R/D : 0, 2 -> add values from depth 0 to 3 // incoming R/D : 1, 4 -> add values from depth 4 to 4 // // Note : the -validity- of values is simply checked by comparing the incoming D value against the // D value of the given nesting level (incoming D >= the D for the nesting level == valid, // otherwise NULL). The tricky part is determining what nesting levels to add values at. // // For schemas with no repetition level (no lists), X is always 0 and Y is always max nesting // depth. // // compute "X" from above for (int s_idx = schema.max_repetition_level; s_idx >= 0; s_idx--) { auto find_shallowest = [&](int r) { int shallowest = -1; int cur_depth = max_depth - 1; int schema_idx = src_col_schema; while (schema_idx > 0) { auto cur_schema = md.get_schema(schema_idx); if (cur_schema.max_repetition_level == r) { // if this is a repeated field, map it one level deeper shallowest = cur_schema.is_stub() ? cur_depth + 1 : cur_depth; } // if it's one-level encoding list else if (cur_schema.is_one_level_list(md.get_schema(cur_schema.parent_idx))) { shallowest = cur_depth - 1; } if (!cur_schema.is_stub()) { cur_depth--; } schema_idx = cur_schema.parent_idx; } return shallowest; }; rep_depth_remap[s_idx] = find_shallowest(s_idx); } // compute "Y" from above for (int s_idx = schema.max_definition_level; s_idx >= 0; s_idx--) { auto find_deepest = [&](int d) { SchemaElement prev_schema; int schema_idx = src_col_schema; int r1 = 0; while (schema_idx > 0) { SchemaElement cur_schema = md.get_schema(schema_idx); if (cur_schema.max_definition_level == d) { // if this is a repeated field, map it one level deeper r1 = cur_schema.is_stub() ? prev_schema.max_repetition_level : cur_schema.max_repetition_level; break; } prev_schema = cur_schema; schema_idx = cur_schema.parent_idx; } // we now know R1 from above. return the deepest nesting level that has the // same repetition level schema_idx = src_col_schema; int depth = max_depth - 1; while (schema_idx > 0) { SchemaElement cur_schema = md.get_schema(schema_idx); if (cur_schema.max_repetition_level == r1) { // if this is a repeated field, map it one level deeper depth = cur_schema.is_stub() ? depth + 1 : depth; break; } if (!cur_schema.is_stub()) { depth--; } prev_schema = cur_schema; schema_idx = cur_schema.parent_idx; } return depth; }; def_depth_remap[s_idx] = find_deepest(s_idx); } } /** * @brief Return the required number of bits to store a value. */ template <typename T = uint8_t> [[nodiscard]] T required_bits(uint32_t max_level) { return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level)); } /** * @brief Converts cuDF units to Parquet units. * * @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type. */ [[nodiscard]] std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id, type_id timestamp_type_id, parquet::Type physical, int8_t converted, int32_t length) { int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0; int32_t clock_rate = 0; if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) { type_width = 1; // I32 -> I8 } else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) { type_width = 2; // I32 -> I16 } else if (column_type_id == type_id::INT32) { type_width = 4; // str -> hash32 } else if (is_chrono(data_type{column_type_id})) { clock_rate = to_clockrate(timestamp_type_id); } int8_t converted_type = converted; if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64 && not cudf::is_fixed_point(data_type{column_type_id})) { converted_type = parquet::UNKNOWN; // Not converting to float64 or decimal } return std::make_tuple(type_width, clock_rate, converted_type); } /** * @brief Reads compressed page data to device memory. * * @param sources Dataset sources * @param page_data Buffers to hold compressed page data for each chunk * @param chunks List of column chunk descriptors * @param begin_chunk Index of first column chunk to read * @param end_chunk Index after the last column chunk to read * @param column_chunk_offsets File offset for all chunks * @param chunk_source_map Association between each column chunk and its source * @param stream CUDA stream used for device memory operations and kernel launches * * @return A future object for reading synchronization */ [[nodiscard]] std::future<void> read_column_chunks_async( std::vector<std::unique_ptr<datasource>> const& sources, std::vector<std::unique_ptr<datasource::buffer>>& page_data, hostdevice_vector<gpu::ColumnChunkDesc>& chunks, size_t begin_chunk, size_t end_chunk, const std::vector<size_t>& column_chunk_offsets, std::vector<size_type> const& chunk_source_map, rmm::cuda_stream_view stream) { // Transfer chunk data, coalescing adjacent chunks std::vector<std::future<size_t>> read_tasks; for (size_t chunk = begin_chunk; chunk < end_chunk;) { const size_t io_offset = column_chunk_offsets[chunk]; size_t io_size = chunks[chunk].compressed_size; size_t next_chunk = chunk + 1; const bool is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED); while (next_chunk < end_chunk) { const size_t next_offset = column_chunk_offsets[next_chunk]; const bool is_next_compressed = (chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED); if (next_offset != io_offset + io_size || is_next_compressed != is_compressed) { // Can't merge if not contiguous or mixing compressed and uncompressed // Not coalescing uncompressed with compressed chunks is so that compressed buffers can be // freed earlier (immediately after decompression stage) to limit peak memory requirements break; } io_size += chunks[next_chunk].compressed_size; next_chunk++; } if (io_size != 0) { auto& source = sources[chunk_source_map[chunk]]; if (source->is_device_read_preferred(io_size)) { auto buffer = rmm::device_buffer(io_size, stream); auto fut_read_size = source->device_read_async( io_offset, io_size, static_cast<uint8_t*>(buffer.data()), stream); read_tasks.emplace_back(std::move(fut_read_size)); page_data[chunk] = datasource::buffer::create(std::move(buffer)); } else { auto const buffer = source->host_read(io_offset, io_size); page_data[chunk] = datasource::buffer::create(rmm::device_buffer(buffer->data(), buffer->size(), stream)); } auto d_compdata = page_data[chunk]->data(); do { chunks[chunk].compressed_data = d_compdata; d_compdata += chunks[chunk].compressed_size; } while (++chunk != next_chunk); } else { chunk = next_chunk; } } auto sync_fn = [](decltype(read_tasks) read_tasks) { for (auto& task : read_tasks) { task.wait(); } }; return std::async(std::launch::deferred, sync_fn, std::move(read_tasks)); } /** * @brief Return the number of total pages from the given column chunks. * * @param chunks List of column chunk descriptors * @param stream CUDA stream used for device memory operations and kernel launches * * @return The total number of pages */ [[nodiscard]] size_t count_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks, rmm::cuda_stream_view stream) { size_t total_pages = 0; chunks.host_to_device(stream); gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); chunks.device_to_host(stream, true); for (size_t c = 0; c < chunks.size(); c++) { total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages; } return total_pages; } // see setupLocalPageInfo() in page_data.cu for supported page encodings constexpr bool is_supported_encoding(Encoding enc) { switch (enc) { case Encoding::PLAIN: case Encoding::PLAIN_DICTIONARY: case Encoding::RLE: case Encoding::RLE_DICTIONARY: return true; default: return false; } } /** * @brief Decode the page information from the given column chunks. * * @param chunks List of column chunk descriptors * @param pages List of page information * @param stream CUDA stream used for device memory operations and kernel launches * @returns The size in bytes of level type data required */ int decode_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks, hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view stream) { // IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages), // please update preprocess_nested_columns to reflect this. for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages; chunks[c].page_info = pages.device_ptr(page_count); page_count += chunks[c].max_num_pages; } chunks.host_to_device(stream); gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); // compute max bytes needed for level data auto level_bit_size = cudf::detail::make_counting_transform_iterator(0, [chunks = chunks.begin()] __device__(int i) { auto c = chunks[i]; return static_cast<int>( max(c.level_bits[gpu::level_type::REPETITION], c.level_bits[gpu::level_type::DEFINITION])); }); // max level data bit size. int const max_level_bits = thrust::reduce(rmm::exec_policy(stream), level_bit_size, level_bit_size + chunks.size(), 0, thrust::maximum<int>()); auto const level_type_size = ::max(1, cudf::util::div_rounding_up_safe(max_level_bits, 8)); pages.device_to_host(stream, true); // validate page encodings CUDF_EXPECTS(std::all_of(pages.begin(), pages.end(), [](auto const& page) { return is_supported_encoding(page.encoding); }), "Unsupported page encoding detected"); return level_type_size; } /** * @brief Decompresses the page data, at page granularity. * * @param chunks List of column chunk descriptors * @param pages List of page information * @param stream CUDA stream used for device memory operations and kernel launches * * @return Device buffer to decompressed page data */ [[nodiscard]] rmm::device_buffer decompress_page_data( hostdevice_vector<gpu::ColumnChunkDesc>& chunks, hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view stream) { auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)>& f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { for (int k = 0; k < page_stride; k++) { f(page_count + k); } } page_count += page_stride; } }; // Brotli scratch memory for decompressing rmm::device_buffer debrotli_scratch; // Count the exact number of compressed pages size_t num_comp_pages = 0; size_t total_decomp_size = 0; struct codec_stats { parquet::Compression compression_type = UNCOMPRESSED; size_t num_pages = 0; int32_t max_decompressed_size = 0; size_t total_decomp_size = 0; }; std::array codecs{codec_stats{parquet::GZIP}, codec_stats{parquet::SNAPPY}, codec_stats{parquet::BROTLI}, codec_stats{parquet::ZSTD}}; auto is_codec_supported = [&codecs](int8_t codec) { if (codec == parquet::UNCOMPRESSED) return true; return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) { return codec == cstats.compression_type; }) != codecs.end(); }; CUDF_EXPECTS(std::all_of(chunks.begin(), chunks.end(), [&is_codec_supported](auto const& chunk) { return is_codec_supported(chunk.codec); }), "Unsupported compression type"); for (auto& codec : codecs) { for_each_codec_page(codec.compression_type, [&](size_t page) { auto page_uncomp_size = pages[page].uncompressed_page_size; total_decomp_size += page_uncomp_size; codec.total_decomp_size += page_uncomp_size; codec.max_decompressed_size = ::max(codec.max_decompressed_size, page_uncomp_size); codec.num_pages++; num_comp_pages++; }); if (codec.compression_type == parquet::BROTLI && codec.num_pages > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream); } } // Dispatch batches of pages to decompress for each codec rmm::device_buffer decomp_pages(total_decomp_size, stream); std::vector<device_span<uint8_t const>> comp_in; comp_in.reserve(num_comp_pages); std::vector<device_span<uint8_t>> comp_out; comp_out.reserve(num_comp_pages); // vectors to save v2 def and rep level data, if any std::vector<device_span<uint8_t const>> copy_in; copy_in.reserve(num_comp_pages); std::vector<device_span<uint8_t>> copy_out; copy_out.reserve(num_comp_pages); rmm::device_uvector<compression_result> comp_res(num_comp_pages, stream); thrust::fill(rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), compression_result{0, compression_status::FAILURE}); size_t decomp_offset = 0; int32_t start_pos = 0; for (const auto& codec : codecs) { if (codec.num_pages == 0) { continue; } for_each_codec_page(codec.compression_type, [&](size_t page_idx) { auto const dst_base = static_cast<uint8_t*>(decomp_pages.data()) + decomp_offset; auto& page = pages[page_idx]; // offset will only be non-zero for V2 pages auto const offset = page.def_lvl_bytes + page.rep_lvl_bytes; // for V2 need to copy def and rep level info into place, and then offset the // input and output buffers. otherwise we'd have to keep both the compressed // and decompressed data. if (offset != 0) { copy_in.emplace_back(page.page_data, offset); copy_out.emplace_back(dst_base, offset); } comp_in.emplace_back(page.page_data + offset, static_cast<size_t>(page.compressed_page_size - offset)); comp_out.emplace_back(dst_base + offset, static_cast<size_t>(page.uncompressed_page_size - offset)); page.page_data = dst_base; decomp_offset += page.uncompressed_page_size; }); host_span<device_span<uint8_t const> const> comp_in_view{comp_in.data() + start_pos, codec.num_pages}; auto const d_comp_in = cudf::detail::make_device_uvector_async( comp_in_view, stream, rmm::mr::get_current_device_resource()); host_span<device_span<uint8_t> const> comp_out_view(comp_out.data() + start_pos, codec.num_pages); auto const d_comp_out = cudf::detail::make_device_uvector_async( comp_out_view, stream, rmm::mr::get_current_device_resource()); device_span<compression_result> d_comp_res_view(comp_res.data() + start_pos, codec.num_pages); switch (codec.compression_type) { case parquet::GZIP: gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream); break; case parquet::SNAPPY: if (nvcomp_integration::is_stable_enabled()) { nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY, d_comp_in, d_comp_out, d_comp_res_view, codec.max_decompressed_size, codec.total_decomp_size, stream); } else { gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream); } break; case parquet::ZSTD: nvcomp::batched_decompress(nvcomp::compression_type::ZSTD, d_comp_in, d_comp_out, d_comp_res_view, codec.max_decompressed_size, codec.total_decomp_size, stream); break; case parquet::BROTLI: gpu_debrotli(d_comp_in, d_comp_out, d_comp_res_view, debrotli_scratch.data(), debrotli_scratch.size(), stream); break; default: CUDF_FAIL("Unexpected decompression dispatch"); break; } start_pos += codec.num_pages; } CUDF_EXPECTS(thrust::all_of(rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), [] __device__(auto const& res) { return res.status == compression_status::SUCCESS; }), "Error during decompression"); // now copy the uncompressed V2 def and rep level data if (not copy_in.empty()) { auto const d_copy_in = cudf::detail::make_device_uvector_async( copy_in, stream, rmm::mr::get_current_device_resource()); auto const d_copy_out = cudf::detail::make_device_uvector_async( copy_out, stream, rmm::mr::get_current_device_resource()); gpu_copy_uncompressed_blocks(d_copy_in, d_copy_out, stream); stream.synchronize(); } // Update the page information in device memory with the updated value of // page_data; it now points to the uncompressed data buffer pages.host_to_device(stream); return decomp_pages; } } // namespace void reader::impl::allocate_nesting_info() { auto const& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; auto& page_nesting_info = _file_itm_data.page_nesting_info; auto& page_nesting_decode_info = _file_itm_data.page_nesting_decode_info; // compute total # of page_nesting infos needed and allocate space. doing this in one // buffer to keep it to a single gpu allocation size_t const total_page_nesting_infos = std::accumulate( chunks.host_ptr(), chunks.host_ptr() + chunks.size(), 0, [&](int total, auto& chunk) { // the schema of the input column auto const& schema = _metadata->get_schema(chunk.src_col_schema); auto const per_page_nesting_info_size = max( schema.max_definition_level + 1, _metadata->get_output_nesting_depth(chunk.src_col_schema)); return total + (per_page_nesting_info_size * chunk.num_data_pages); }); page_nesting_info = hostdevice_vector<gpu::PageNestingInfo>{total_page_nesting_infos, _stream}; page_nesting_decode_info = hostdevice_vector<gpu::PageNestingDecodeInfo>{total_page_nesting_infos, _stream}; // update pointers in the PageInfos int target_page_index = 0; int src_info_index = 0; for (size_t idx = 0; idx < chunks.size(); idx++) { int src_col_schema = chunks[idx].src_col_schema; auto& schema = _metadata->get_schema(src_col_schema); auto const per_page_nesting_info_size = ::max( schema.max_definition_level + 1, _metadata->get_output_nesting_depth(src_col_schema)); // skip my dict pages target_page_index += chunks[idx].num_dict_pages; for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) { pages[target_page_index + p_idx].nesting = page_nesting_info.device_ptr() + src_info_index; pages[target_page_index + p_idx].nesting_decode = page_nesting_decode_info.device_ptr() + src_info_index; pages[target_page_index + p_idx].nesting_info_size = per_page_nesting_info_size; pages[target_page_index + p_idx].num_output_nesting_levels = _metadata->get_output_nesting_depth(src_col_schema); src_info_index += per_page_nesting_info_size; } target_page_index += chunks[idx].num_data_pages; } // fill in int nesting_info_index = 0; std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping; for (size_t idx = 0; idx < chunks.size(); idx++) { int src_col_schema = chunks[idx].src_col_schema; // schema of the input column auto& schema = _metadata->get_schema(src_col_schema); // real depth of the output cudf column hierarchy (1 == no nesting, 2 == 1 level, etc) int max_depth = _metadata->get_output_nesting_depth(src_col_schema); // # of nesting infos stored per page for this column auto const per_page_nesting_info_size = ::max(schema.max_definition_level + 1, max_depth); // if this column has lists, generate depth remapping std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping; if (schema.max_repetition_level > 0) { generate_depth_remappings(depth_remapping, src_col_schema, *_metadata); } // fill in host-side nesting info int schema_idx = src_col_schema; auto cur_schema = _metadata->get_schema(schema_idx); int cur_depth = max_depth - 1; while (schema_idx > 0) { // stub columns (basically the inner field of a list scheme element) are not real columns. // we can ignore them for the purposes of output nesting info if (!cur_schema.is_stub()) { // initialize each page within the chunk for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) { gpu::PageNestingInfo* pni = &page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; gpu::PageNestingDecodeInfo* nesting_info = &page_nesting_decode_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; // if we have lists, set our start and end depth remappings if (schema.max_repetition_level > 0) { auto remap = depth_remapping.find(src_col_schema); CUDF_EXPECTS(remap != depth_remapping.end(), "Could not find depth remapping for schema"); std::vector<int> const& rep_depth_remap = (remap->second.first); std::vector<int> const& def_depth_remap = (remap->second.second); for (size_t m = 0; m < rep_depth_remap.size(); m++) { nesting_info[m].start_depth = rep_depth_remap[m]; } for (size_t m = 0; m < def_depth_remap.size(); m++) { nesting_info[m].end_depth = def_depth_remap[m]; } } // values indexed by output column index nesting_info[cur_depth].max_def_level = cur_schema.max_definition_level; pni[cur_depth].size = 0; pni[cur_depth].type = to_type_id(cur_schema, _strings_to_categorical, _timestamp_type.id()); pni[cur_depth].nullable = cur_schema.repetition_type == OPTIONAL; } // move up the hierarchy cur_depth--; } // next schema schema_idx = cur_schema.parent_idx; cur_schema = _metadata->get_schema(schema_idx); } nesting_info_index += (per_page_nesting_info_size * chunks[idx].num_data_pages); } // copy nesting info to the device page_nesting_info.host_to_device(_stream); page_nesting_decode_info.host_to_device(_stream); } void reader::impl::allocate_level_decode_space() { auto& pages = _file_itm_data.pages_info; // TODO: this could be made smaller if we ignored dictionary pages and pages with no // repetition data. size_t const per_page_decode_buf_size = LEVEL_DECODE_BUF_SIZE * 2 * _file_itm_data.level_type_size; auto const decode_buf_size = per_page_decode_buf_size * pages.size(); _file_itm_data.level_decode_data = rmm::device_buffer(decode_buf_size, _stream, rmm::mr::get_current_device_resource()); // distribute the buffers uint8_t* buf = static_cast<uint8_t*>(_file_itm_data.level_decode_data.data()); for (size_t idx = 0; idx < pages.size(); idx++) { auto& p = pages[idx]; p.lvl_decode_buf[gpu::level_type::DEFINITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size); p.lvl_decode_buf[gpu::level_type::REPETITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size); } } std::pair<bool, std::vector<std::future<void>>> reader::impl::create_and_read_column_chunks( cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows) { auto& raw_page_data = _file_itm_data.raw_page_data; auto& chunks = _file_itm_data.chunks; // Descriptors for all the chunks that make up the selected columns const auto num_input_columns = _input_columns.size(); const auto num_chunks = row_groups_info.size() * num_input_columns; chunks = hostdevice_vector<gpu::ColumnChunkDesc>(0, num_chunks, _stream); // Association between each column chunk and its source std::vector<size_type> chunk_source_map(num_chunks); // Tracker for eventually deallocating compressed and uncompressed data raw_page_data = std::vector<std::unique_ptr<datasource::buffer>>(num_chunks); // Keep track of column chunk file offsets std::vector<size_t> column_chunk_offsets(num_chunks); // Initialize column chunk information size_t total_decompressed_size = 0; auto remaining_rows = num_rows; std::vector<std::future<void>> read_rowgroup_tasks; for (const auto& rg : row_groups_info) { const auto& row_group = _metadata->get_row_group(rg.index, rg.source_index); auto const row_group_start = rg.start_row; auto const row_group_source = rg.source_index; auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows); // generate ColumnChunkDesc objects for everything to be decoded (all input columns) for (size_t i = 0; i < num_input_columns; ++i) { auto col = _input_columns[i]; // look up metadata auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx); auto& schema = _metadata->get_schema(col.schema_idx); auto [type_width, clock_rate, converted_type] = conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()), _timestamp_type.id(), schema.type, schema.converted_type, schema.type_length); column_chunk_offsets[chunks.size()] = (col_meta.dictionary_page_offset != 0) ? ::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) : col_meta.data_page_offset; chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size, nullptr, col_meta.num_values, schema.type, type_width, row_group_start, row_group_rows, schema.max_definition_level, schema.max_repetition_level, _metadata->get_output_nesting_depth(col.schema_idx), required_bits(schema.max_definition_level), required_bits(schema.max_repetition_level), col_meta.codec, converted_type, schema.logical_type, schema.decimal_precision, clock_rate, i, col.schema_idx)); // Map each column chunk to its column index and its source index chunk_source_map[chunks.size() - 1] = row_group_source; if (col_meta.codec != Compression::UNCOMPRESSED) { total_decompressed_size += col_meta.total_uncompressed_size; } } remaining_rows -= row_group_rows; } // Read compressed chunk data to device memory read_rowgroup_tasks.push_back(read_column_chunks_async(_sources, raw_page_data, chunks, 0, chunks.size(), column_chunk_offsets, chunk_source_map, _stream)); CUDF_EXPECTS(remaining_rows == 0, "All rows data must be read."); return {total_decompressed_size > 0, std::move(read_rowgroup_tasks)}; } void reader::impl::load_and_decompress_data( cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows) { // This function should never be called if `num_rows == 0`. CUDF_EXPECTS(num_rows > 0, "Number of reading rows must not be zero."); auto& raw_page_data = _file_itm_data.raw_page_data; auto& decomp_page_data = _file_itm_data.decomp_page_data; auto& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; auto const [has_compressed_data, read_rowgroup_tasks] = create_and_read_column_chunks(row_groups_info, num_rows); for (auto& task : read_rowgroup_tasks) { task.wait(); } // Process dataset chunk pages into output columns auto const total_pages = count_page_headers(chunks, _stream); pages = hostdevice_vector<gpu::PageInfo>(total_pages, total_pages, _stream); if (total_pages > 0) { // decoding of column/page information _file_itm_data.level_type_size = decode_page_headers(chunks, pages, _stream); if (has_compressed_data) { decomp_page_data = decompress_page_data(chunks, pages, _stream); // Free compressed data for (size_t c = 0; c < chunks.size(); c++) { if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { raw_page_data[c].reset(); } } } // build output column info // walk the schema, building out_buffers that mirror what our final cudf columns will look // like. important : there is not necessarily a 1:1 mapping between input columns and output // columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct // columns. The "structiness" is simply implied by the schema. For example, this schema: // required group field_id=1 name { // required binary field_id=2 firstname (String); // required binary field_id=3 middlename (String); // required binary field_id=4 lastname (String); // } // will only contain 3 columns of data (firstname, middlename, lastname). But of course // "name" is a struct column that we want to return, so we have to make sure that we // create it ourselves. // std::vector<output_column_info> output_info = build_output_column_info(); // the following two allocate functions modify the page data pages.device_to_host(_stream, true); { // nesting information (sizes, etc) stored -per page- // note : even for flat schemas, we allocate 1 level of "nesting" info allocate_nesting_info(); // level decode space allocate_level_decode_space(); } pages.host_to_device(_stream); } } namespace { struct cumulative_row_info { size_t row_count; // cumulative row count size_t size_bytes; // cumulative size in bytes int key; // schema index }; #if defined(PREPROCESS_DEBUG) void print_pages(hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view _stream) { pages.device_to_host(_stream, true); for (size_t idx = 0; idx < pages.size(); idx++) { auto const& p = pages[idx]; // skip dictionary pages if (p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; } printf( "P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d), " "str_bytes(%d)\n", idx, p.src_col_schema, p.chunk_row, p.num_rows, p.skipped_values, p.skipped_leaf_values, p.str_bytes); } } void print_cumulative_page_info(hostdevice_vector<gpu::PageInfo>& pages, rmm::device_uvector<int32_t> const& page_index, rmm::device_uvector<cumulative_row_info> const& c_info, rmm::cuda_stream_view stream) { pages.device_to_host(stream, true); printf("------------\nCumulative sizes by page\n"); std::vector<int> schemas(pages.size()); std::vector<int> h_page_index(pages.size()); CUDF_CUDA_TRY(hipMemcpy( h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), hipMemcpyDefault)); std::vector<cumulative_row_info> h_cinfo(pages.size()); CUDF_CUDA_TRY(hipMemcpy( h_cinfo.data(), c_info.data(), sizeof(cumulative_row_info) * pages.size(), hipMemcpyDefault)); auto schema_iter = cudf::detail::make_counting_transform_iterator( 0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; }); thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin()); auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end()); schemas.resize(last - schemas.begin()); printf("Num schemas: %lu\n", schemas.size()); for (size_t idx = 0; idx < schemas.size(); idx++) { printf("Schema %d\n", schemas[idx]); for (size_t pidx = 0; pidx < pages.size(); pidx++) { auto const& page = pages[h_page_index[pidx]]; if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) { continue; } printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes); } } } void print_cumulative_row_info( host_span<cumulative_row_info const> sizes, std::string const& label, std::optional<std::vector<gpu::chunk_read_info>> splits = std::nullopt) { if (splits.has_value()) { printf("------------\nSplits\n"); for (size_t idx = 0; idx < splits->size(); idx++) { printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows); } } printf("------------\nCumulative sizes %s\n", label.c_str()); for (size_t idx = 0; idx < sizes.size(); idx++) { printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key); if (splits.has_value()) { // if we have a split at this row count and this is the last instance of this row count auto start = thrust::make_transform_iterator( splits->begin(), [](gpu::chunk_read_info const& i) { return i.skip_rows; }); auto end = start + splits->size(); auto split = std::find(start, end, sizes[idx].row_count); auto const split_index = [&]() -> int { if (split != end && ((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) { return static_cast<int>(std::distance(start, split)); } return idx == 0 ? 0 : -1; }(); if (split_index >= 0) { printf(" <-- split {%lu, %lu}", splits.value()[split_index].skip_rows, splits.value()[split_index].num_rows); } } printf("\n"); } } #endif // PREPROCESS_DEBUG /** * @brief Functor which reduces two cumulative_row_info structs of the same key. */ struct cumulative_row_sum { cumulative_row_info operator() __device__(cumulative_row_info const& a, cumulative_row_info const& b) const { return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key}; } }; /** * @brief Functor which computes the total data size for a given type of cudf column. * * In the case of strings, the return size does not include the chars themselves. That * information is tracked separately (see PageInfo::str_bytes). */ struct row_size_functor { __device__ size_t validity_size(size_t num_rows, bool nullable) { return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0; } template <typename T> __device__ size_t operator()(size_t num_rows, bool nullable) { auto const element_size = sizeof(device_storage_type_t<T>); return (element_size * num_rows) + validity_size(num_rows, nullable); } }; template <> __device__ size_t row_size_functor::operator()<list_view>(size_t num_rows, bool nullable) { auto const offset_size = sizeof(offset_type); // NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset // for the entire column, whereas this is adding an extra offset per page. So we will get a // small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better // to overestimate size somewhat than to underestimate it and potentially generate chunks // that are too large. return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); } template <> __device__ size_t row_size_functor::operator()<struct_view>(size_t num_rows, bool nullable) { return validity_size(num_rows, nullable); } template <> __device__ size_t row_size_functor::operator()<string_view>(size_t num_rows, bool nullable) { // only returns the size of offsets and validity. the size of the actual string chars // is tracked separately. auto const offset_size = sizeof(offset_type); // see note about offsets in the list_view template. return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); } /** * @brief Functor which computes the total output cudf data size for all of * the data in this page. * * Sums across all nesting levels. */ struct get_cumulative_row_info { gpu::PageInfo const* const pages; __device__ cumulative_row_info operator()(size_type index) { auto const& page = pages[index]; if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { return cumulative_row_info{0, 0, page.src_col_schema}; } // total nested size, not counting string data auto iter = cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) { auto const& pni = page.nesting[i]; return cudf::type_dispatcher( data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable); }); size_t const row_count = static_cast<size_t>(page.nesting[0].size); return { row_count, thrust::reduce(thrust::seq, iter, iter + page.num_output_nesting_levels) + page.str_bytes, page.src_col_schema}; } }; /** * @brief Functor which computes the effective size of all input columns by page. * * For a given row, we want to find the cost of all pages for all columns involved * in loading up to that row. The complication here is that not all pages are the * same size between columns. Example: * * page row counts * Column A: 0 <----> 100 <----> 200 * Column B: 0 <---------------> 200 <--------> 400 | * if we decide to split at row 100, we don't really know the actual amount of bytes in column B * at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that * page. Essentially, a conservative over-estimate of the real size. */ struct row_total_size { cumulative_row_info const* c_info; size_type const* key_offsets; size_t num_keys; __device__ cumulative_row_info operator()(cumulative_row_info const& i) { // sum sizes for each input column at this row size_t sum = 0; for (int idx = 0; idx < num_keys; idx++) { auto const start = key_offsets[idx]; auto const end = key_offsets[idx + 1]; auto iter = cudf::detail::make_counting_transform_iterator( 0, [&] __device__(size_type i) { return c_info[i].row_count; }); auto const page_index = thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter; sum += c_info[page_index].size_bytes; } return {i.row_count, sum, i.key}; } }; /** * @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read * limit, determine the set of splits. * * @param sizes Vector of cumulative {row_count, byte_size} pairs * @param num_rows Total number of rows to read * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns */ std::vector<gpu::chunk_read_info> find_splits(std::vector<cumulative_row_info> const& sizes, size_t num_rows, size_t chunk_read_limit) { // now we have an array of {row_count, real output bytes}. just walk through it and generate // splits. // TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch // sizes are reasonably large, this shouldn't iterate too many times std::vector<gpu::chunk_read_info> splits; { size_t cur_pos = 0; size_t cur_cumulative_size = 0; size_t cur_row_count = 0; auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) { return i.size_bytes - cur_cumulative_size; }); auto end = start + sizes.size(); while (cur_row_count < num_rows) { int64_t split_pos = thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start; // if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back // one. if (static_cast<size_t>(split_pos) >= sizes.size() || (sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) { split_pos--; } // best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in // a loop because all of the cumulative sizes for all the pages are sorted into one big list. // so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in // the list twice. so we have to iterate until we skip past all of them. The idea is that we // either do this, or we have to call unique() on the input first. while (split_pos < (static_cast<int64_t>(sizes.size()) - 1) && (split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) { split_pos++; } auto const start_row = cur_row_count; cur_row_count = sizes[split_pos].row_count; splits.push_back(gpu::chunk_read_info{start_row, cur_row_count - start_row}); cur_pos = split_pos; cur_cumulative_size = sizes[split_pos].size_bytes; } } // print_cumulative_row_info(sizes, "adjusted", splits); return splits; } /** * @brief Given a set of pages that have had their sizes computed by nesting level and * a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing * a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes. * * @param pages All pages in the file * @param id Additional intermediate information required to process the pages * @param num_rows Total number of rows to read * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns * @param stream CUDA stream to use, default 0 */ std::vector<gpu::chunk_read_info> compute_splits(hostdevice_vector<gpu::PageInfo>& pages, gpu::chunk_intermediate_data const& id, size_t num_rows, size_t chunk_read_limit, rmm::cuda_stream_view stream) { auto const& page_keys = id.page_keys; auto const& page_index = id.page_index; // generate cumulative row counts and sizes rmm::device_uvector<cumulative_row_info> c_info(page_keys.size(), stream); // convert PageInfo to cumulative_row_info auto page_input = thrust::make_transform_iterator(page_index.begin(), get_cumulative_row_info{pages.device_ptr()}); thrust::inclusive_scan_by_key(rmm::exec_policy(stream), page_keys.begin(), page_keys.end(), page_input, c_info.begin(), thrust::equal_to{}, cumulative_row_sum{}); // print_cumulative_page_info(pages, page_index, c_info, stream); // sort by row count rmm::device_uvector<cumulative_row_info> c_info_sorted{c_info, stream}; thrust::sort(rmm::exec_policy(stream), c_info_sorted.begin(), c_info_sorted.end(), [] __device__(cumulative_row_info const& a, cumulative_row_info const& b) { return a.row_count < b.row_count; }); // std::vector<cumulative_row_info> h_c_info_sorted(c_info_sorted.size()); // CUDF_CUDA_TRY(hipMemcpy(h_c_info_sorted.data(), // c_info_sorted.data(), // sizeof(cumulative_row_info) * c_info_sorted.size(), // hipMemcpyDefault)); // print_cumulative_row_info(h_c_info_sorted, "raw"); // generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per // key rmm::device_uvector<size_type> key_offsets(page_keys.size() + 1, stream); auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(stream), page_keys.begin(), page_keys.end(), thrust::make_constant_iterator(1), thrust::make_discard_iterator(), key_offsets.begin()) .second; size_t const num_unique_keys = key_offsets_end - key_offsets.begin(); thrust::exclusive_scan( rmm::exec_policy(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin()); // adjust the cumulative info such that for each row count, the size includes any pages that span // that row count. this is so that if we have this case: // page row counts // Column A: 0 <----> 100 <----> 200 // Column B: 0 <---------------> 200 <--------> 400 // | // if we decide to split at row 100, we don't really know the actual amount of bytes in column B // at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that // page. // rmm::device_uvector<cumulative_row_info> aggregated_info(c_info.size(), stream); thrust::transform(rmm::exec_policy(stream), c_info_sorted.begin(), c_info_sorted.end(), aggregated_info.begin(), row_total_size{c_info.data(), key_offsets.data(), num_unique_keys}); // bring back to the cpu std::vector<cumulative_row_info> h_aggregated_info(aggregated_info.size()); CUDF_CUDA_TRY(hipMemcpyAsync(h_aggregated_info.data(), aggregated_info.data(), sizeof(cumulative_row_info) * c_info.size(), hipMemcpyDefault, stream.value())); stream.synchronize(); return find_splits(h_aggregated_info, num_rows, chunk_read_limit); } struct get_page_chunk_idx { __device__ size_type operator()(gpu::PageInfo const& page) { return page.chunk_idx; } }; struct get_page_num_rows { __device__ size_type operator()(gpu::PageInfo const& page) { return page.num_rows; } }; struct get_page_schema { __device__ size_type operator()(gpu::PageInfo const& page) { return page.src_col_schema; } }; struct input_col_info { int const schema_idx; size_type const nesting_depth; }; /** * @brief Converts a 1-dimensional index into page, depth and column indices used in * allocate_columns to compute columns sizes. * * The input index will iterate through pages, nesting depth and column indices in that order. */ struct reduction_indices { size_t const page_idx; size_type const depth_idx; size_type const col_idx; __device__ reduction_indices(size_t index_, size_type max_depth_, size_t num_pages_) : page_idx(index_ % num_pages_), depth_idx((index_ / num_pages_) % max_depth_), col_idx(index_ / (max_depth_ * num_pages_)) { } }; /** * @brief Returns the size field of a PageInfo struct for a given depth, keyed by schema. */ struct get_page_nesting_size { input_col_info const* const input_cols; size_type const max_depth; size_t const num_pages; gpu::PageInfo const* const pages; int const* page_indices; __device__ size_type operator()(size_t index) const { auto const indices = reduction_indices{index, max_depth, num_pages}; auto const& page = pages[page_indices[indices.page_idx]]; if (page.src_col_schema != input_cols[indices.col_idx].schema_idx || page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return 0; } return page.nesting[indices.depth_idx].batch_size; } }; struct get_reduction_key { size_t const num_pages; __device__ size_t operator()(size_t index) const { return index / num_pages; } }; /** * @brief Writes to the chunk_row field of the PageInfo struct. */ struct chunk_row_output_iter { gpu::PageInfo* p; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; __host__ __device__ chunk_row_output_iter operator+(int i) { return chunk_row_output_iter{p + i}; } __host__ __device__ void operator++() { p++; } __device__ reference operator[](int i) { return p[i].chunk_row; } __device__ reference operator*() { return p->chunk_row; } }; /** * @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema. */ struct start_offset_output_iterator { gpu::PageInfo const* pages; int const* page_indices; size_t cur_index; input_col_info const* input_cols; size_type max_depth; size_t num_pages; int empty = 0; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; constexpr void operator=(start_offset_output_iterator const& other) { pages = other.pages; page_indices = other.page_indices; cur_index = other.cur_index; input_cols = other.input_cols; max_depth = other.max_depth; num_pages = other.num_pages; } constexpr start_offset_output_iterator operator+(size_t i) { return start_offset_output_iterator{ pages, page_indices, cur_index + i, input_cols, max_depth, num_pages}; } constexpr void operator++() { cur_index++; } __device__ reference operator[](size_t i) { return dereference(cur_index + i); } __device__ reference operator*() { return dereference(cur_index); } private: __device__ reference dereference(size_t index) { auto const indices = reduction_indices{index, max_depth, num_pages}; gpu::PageInfo const& p = pages[page_indices[indices.page_idx]]; if (p.src_col_schema != input_cols[indices.col_idx].schema_idx || p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return empty; } return p.nesting_decode[indices.depth_idx].page_start_value; } }; struct flat_column_num_rows { gpu::PageInfo const* pages; gpu::ColumnChunkDesc const* chunks; __device__ size_type operator()(size_type pindex) const { gpu::PageInfo const& page = pages[pindex]; // ignore dictionary pages and pages belonging to any column containing repetition (lists) if ((page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) || (chunks[page.chunk_idx].max_level[gpu::level_type::REPETITION] > 0)) { return 0; } return page.num_rows; } }; struct row_counts_nonzero { __device__ bool operator()(size_type count) const { return count > 0; } }; struct row_counts_different { size_type const expected; __device__ bool operator()(size_type count) const { return (count != 0) && (count != expected); } }; /** * @brief Detect malformed parquet input data. * * We have seen cases where parquet files can be oddly malformed. This function specifically * detects one case in particular: * * - When you have a file containing N rows * - For some reason, the sum total of the number of rows over all pages for a given column * is != N * * @param pages All pages to be decoded * @param chunks Chunk data * @param page_keys Keys (schema id) associated with each page, sorted by column * @param page_index Page indices for iteration, sorted by column * @param expected_row_count Expected row count, if applicable * @param stream CUDA stream used for device memory operations and kernel launches */ void detect_malformed_pages(hostdevice_vector<gpu::PageInfo>& pages, hostdevice_vector<gpu::ColumnChunkDesc> const& chunks, device_span<const int> page_keys, device_span<const int> page_index, std::optional<size_t> expected_row_count, rmm::cuda_stream_view stream) { // sum row counts for all non-dictionary, non-list columns. other columns will be indicated as 0 rmm::device_uvector<size_type> row_counts(pages.size(), stream); // worst case: num keys == num pages auto const size_iter = thrust::make_transform_iterator( page_index.begin(), flat_column_num_rows{pages.device_ptr(), chunks.device_ptr()}); auto const row_counts_begin = row_counts.begin(); auto const row_counts_end = thrust::reduce_by_key(rmm::exec_policy(stream), page_keys.begin(), page_keys.end(), size_iter, thrust::make_discard_iterator(), row_counts_begin) .second; // make sure all non-zero row counts are the same rmm::device_uvector<size_type> compacted_row_counts(pages.size(), stream); auto const compacted_row_counts_begin = compacted_row_counts.begin(); auto const compacted_row_counts_end = thrust::copy_if(rmm::exec_policy(stream), row_counts_begin, row_counts_end, compacted_row_counts_begin, row_counts_nonzero{}); if (compacted_row_counts_end != compacted_row_counts_begin) { size_t const found_row_count = static_cast<size_t>(compacted_row_counts.element(0, stream)); // if we somehow don't match the expected row count from the row groups themselves if (expected_row_count.has_value()) { CUDF_EXPECTS(expected_row_count.value() == found_row_count, "Encountered malformed parquet page data (unexpected row count in page data)"); } // all non-zero row counts must be the same auto const chk = thrust::count_if(rmm::exec_policy(stream), compacted_row_counts_begin, compacted_row_counts_end, row_counts_different{static_cast<size_type>(found_row_count)}); CUDF_EXPECTS(chk == 0, "Encountered malformed parquet page data (row count mismatch in page data)"); } } } // anonymous namespace void reader::impl::preprocess_pages(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds, size_t chunk_read_limit) { auto& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; // compute page ordering. // // ordering of pages is by input column schema, repeated across row groups. so // if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like // // 1, 1, 2, 2, 3, 3 // // However, if we had more than one row group, the pattern would be // // 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3 // ^ row group 0 | // ^ row group 1 // // To process pages by key (exclusive_scan_by_key, reduce_by_key, etc), the ordering we actually // want is // // 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3 // // We also need to preserve key-relative page ordering, so we need to use a stable sort. rmm::device_uvector<int> page_keys(pages.size(), _stream); rmm::device_uvector<int> page_index(pages.size(), _stream); { thrust::transform(rmm::exec_policy(_stream), pages.device_ptr(), pages.device_ptr() + pages.size(), page_keys.begin(), get_page_schema{}); thrust::sequence(rmm::exec_policy(_stream), page_index.begin(), page_index.end()); thrust::stable_sort_by_key(rmm::exec_policy(_stream), page_keys.begin(), page_keys.end(), page_index.begin(), thrust::less<int>()); } // detect malformed columns. // - we have seen some cases in the wild where we have a row group containing N // rows, but the total number of rows in the pages for column X is != N. while it // is possible to load this by just capping the number of rows read, we cannot tell // which rows are invalid so we may be returning bad data. in addition, this mismatch // confuses the chunked reader detect_malformed_pages(pages, chunks, page_keys, page_index, uses_custom_row_bounds ? std::nullopt : std::make_optional(num_rows), _stream); // iterate over all input columns and determine if they contain lists so we can further // preprocess them. bool has_lists = false; for (size_t idx = 0; idx < _input_columns.size(); idx++) { auto const& input_col = _input_columns[idx]; size_t const max_depth = input_col.nesting_depth(); auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < max_depth; l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the // data computed during gpu::ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; break; } } if (has_lists) { break; } } // generate string dict indices if necessary { auto is_dict_chunk = [](const gpu::ColumnChunkDesc& chunk) { return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0; }; // Count the number of string dictionary entries // NOTE: Assumes first page in the chunk is always the dictionary page size_t total_str_dict_indexes = 0; for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_input_values; } page_count += chunks[c].max_num_pages; } // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements _chunk_itm_data.str_dict_index = cudf::detail::make_zeroed_device_uvector_async<string_index_pair>( total_str_dict_indexes, _stream, rmm::mr::get_current_device_resource()); // Update chunks with pointers to string dict indices for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) { input_column_info const& input_col = _input_columns[chunks[c].src_col_index]; CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema, "Column/page schema index mismatch"); if (is_dict_chunk(chunks[c])) { chunks[c].str_dict_index = _chunk_itm_data.str_dict_index.data() + str_ofs; str_ofs += pages[page_count].num_input_values; } // column_data_base will always point to leaf data, even for nested types. page_count += chunks[c].max_num_pages; } if (total_str_dict_indexes > 0) { chunks.host_to_device(_stream); gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream); } } // intermediate data we will need for further chunked reads if (has_lists || chunk_read_limit > 0) { // computes: // PageNestingInfo::num_rows for each page. the true number of rows (taking repetition into // account), not just the number of values. PageNestingInfo::size for each level of nesting, for // each page. // // we will be applying a later "trim" pass if skip_rows/num_rows is being used, which can happen // if: // - user has passed custom row bounds // - we will be doing a chunked read gpu::ComputePageSizes(pages, chunks, 0, // 0-max size_t. process all possible rows std::numeric_limits<size_t>::max(), true, // compute num_rows chunk_read_limit > 0, // compute string sizes _file_itm_data.level_type_size, _stream); // computes: // PageInfo::chunk_row (the absolute start row index) for all pages // Note: this is doing some redundant work for pages in flat hierarchies. chunk_row has already // been computed during header decoding. the overall amount of work here is very small though. auto key_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_chunk_idx{}); auto page_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_num_rows{}); thrust::exclusive_scan_by_key(rmm::exec_policy(_stream), key_input, key_input + pages.size(), page_input, chunk_row_output_iter{pages.device_ptr()}); // preserve page ordering data _chunk_itm_data.page_keys = std::move(page_keys); _chunk_itm_data.page_index = std::move(page_index); // retrieve pages back pages.device_to_host(_stream, true); // print_pages(pages, _stream); } // compute splits if necessary. otherwise return a single split representing // the whole file. _chunk_read_info = chunk_read_limit > 0 ? compute_splits(pages, _chunk_itm_data, num_rows, chunk_read_limit, _stream) : std::vector<gpu::chunk_read_info>{{skip_rows, num_rows}}; } void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds) { auto const& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; // Should not reach here if there is no page data. CUDF_EXPECTS(pages.size() > 0, "There is no page to parse"); // computes: // PageNestingInfo::batch_size for each level of nesting, for each page, taking row bounds into // account. PageInfo::skipped_values, which tells us where to start decoding in the input to // respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds // is set (if the user has specified artificial bounds). if (uses_custom_row_bounds) { gpu::ComputePageSizes(pages, chunks, skip_rows, num_rows, false, // num_rows is already computed false, // no need to compute string sizes _file_itm_data.level_type_size, _stream); // print_pages(pages, _stream); } // iterate over all input columns and allocate any associated output // buffers if they are not part of a list hierarchy. mark down // if we have any list columns that need further processing. bool has_lists = false; for (size_t idx = 0; idx < _input_columns.size(); idx++) { auto const& input_col = _input_columns[idx]; size_t const max_depth = input_col.nesting_depth(); auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < max_depth; l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the // data computed during gpu::ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; } // if we haven't already processed this column because it is part of a struct hierarchy else if (out_buf.size == 0) { // add 1 for the offset if this is a list column out_buf.create( out_buf.type.id() == type_id::LIST && l_idx < max_depth ? num_rows + 1 : num_rows, _stream, _mr); } } } // compute output column sizes by examining the pages of the -input- columns if (has_lists) { auto& page_index = _chunk_itm_data.page_index; std::vector<input_col_info> h_cols_info; h_cols_info.reserve(_input_columns.size()); std::transform(_input_columns.cbegin(), _input_columns.cend(), std::back_inserter(h_cols_info), [](auto& col) -> input_col_info { return {col.schema_idx, static_cast<size_type>(col.nesting_depth())}; }); auto const max_depth = (*std::max_element(h_cols_info.cbegin(), h_cols_info.cend(), [](auto& l, auto& r) { return l.nesting_depth < r.nesting_depth; })) .nesting_depth; auto const d_cols_info = cudf::detail::make_device_uvector_async( h_cols_info, _stream, rmm::mr::get_current_device_resource()); auto const num_keys = _input_columns.size() * max_depth * pages.size(); // size iterator. indexes pages by sorted order rmm::device_uvector<size_type> size_input{num_keys, _stream}; thrust::transform( rmm::exec_policy(_stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(num_keys), size_input.begin(), get_page_nesting_size{ d_cols_info.data(), max_depth, pages.size(), pages.device_ptr(), page_index.begin()}); auto const reduction_keys = cudf::detail::make_counting_transform_iterator(0, get_reduction_key{pages.size()}); hostdevice_vector<size_t> sizes{_input_columns.size() * max_depth, _stream}; // find the size of each column thrust::reduce_by_key(rmm::exec_policy(_stream), reduction_keys, reduction_keys + num_keys, size_input.cbegin(), thrust::make_discard_iterator(), sizes.d_begin()); // for nested hierarchies, compute per-page start offset thrust::exclusive_scan_by_key( rmm::exec_policy(_stream), reduction_keys, reduction_keys + num_keys, size_input.cbegin(), start_offset_output_iterator{ pages.device_ptr(), page_index.begin(), 0, d_cols_info.data(), max_depth, pages.size()}); sizes.device_to_host(_stream, true); for (size_type idx = 0; idx < static_cast<size_type>(_input_columns.size()); idx++) { auto const& input_col = _input_columns[idx]; auto* cols = &_output_buffers; for (size_type l_idx = 0; l_idx < static_cast<size_type>(input_col.nesting_depth()); l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this buffer is part of a list hierarchy, we need to determine it's // final size and allocate it here. // // for struct columns, higher levels of the output columns are shared between input // columns. so don't compute any given level more than once. if ((out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) && out_buf.size == 0) { auto size = sizes[(idx * max_depth) + l_idx]; // if this is a list column add 1 for non-leaf levels for the terminating offset if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; } // allocate out_buf.create(size, _stream, _mr); } } } } } } // namespace cudf::io::detail::parquet
7368bc8cb9057170f60b3c9cdde844ecbd6c5cd7.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reader_impl.hpp" #include <io/comp/nvcomp_adapter.hpp> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/iterator_categories.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/logical.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #include <numeric> namespace cudf::io::detail::parquet { namespace { /** * @brief Generate depth remappings for repetition and definition levels. * * When dealing with columns that contain lists, we must examine incoming * repetition and definition level pairs to determine what range of output nesting * is indicated when adding new values. This function generates the mappings of * the R/D levels to those start/end bounds * * @param remap Maps column schema index to the R/D remapping vectors for that column * @param src_col_schema The column schema to generate the new mapping for * @param md File metadata information */ void generate_depth_remappings(std::map<int, std::pair<std::vector<int>, std::vector<int>>>& remap, int src_col_schema, aggregate_reader_metadata const& md) { // already generated for this level if (remap.find(src_col_schema) != remap.end()) { return; } auto schema = md.get_schema(src_col_schema); int max_depth = md.get_output_nesting_depth(src_col_schema); CUDF_EXPECTS(remap.find(src_col_schema) == remap.end(), "Attempting to remap a schema more than once"); auto inserted = remap.insert(std::pair<int, std::pair<std::vector<int>, std::vector<int>>>{src_col_schema, {}}); auto& depth_remap = inserted.first->second; std::vector<int>& rep_depth_remap = (depth_remap.first); rep_depth_remap.resize(schema.max_repetition_level + 1); std::vector<int>& def_depth_remap = (depth_remap.second); def_depth_remap.resize(schema.max_definition_level + 1); // the key: // for incoming level values R/D // add values starting at the shallowest nesting level X has repetition level R // until you reach the deepest nesting level Y that corresponds to the repetition level R1 // held by the nesting level that has definition level D // // Example: a 3 level struct with a list at the bottom // // R / D Depth // level0 0 / 1 0 // level1 0 / 2 1 // level2 0 / 3 2 // list 0 / 3 3 // element 1 / 4 4 // // incoming R/D : 0, 0 -> add values from depth 0 to 3 (def level 0 always maps to depth 0) // incoming R/D : 0, 1 -> add values from depth 0 to 3 // incoming R/D : 0, 2 -> add values from depth 0 to 3 // incoming R/D : 1, 4 -> add values from depth 4 to 4 // // Note : the -validity- of values is simply checked by comparing the incoming D value against the // D value of the given nesting level (incoming D >= the D for the nesting level == valid, // otherwise NULL). The tricky part is determining what nesting levels to add values at. // // For schemas with no repetition level (no lists), X is always 0 and Y is always max nesting // depth. // // compute "X" from above for (int s_idx = schema.max_repetition_level; s_idx >= 0; s_idx--) { auto find_shallowest = [&](int r) { int shallowest = -1; int cur_depth = max_depth - 1; int schema_idx = src_col_schema; while (schema_idx > 0) { auto cur_schema = md.get_schema(schema_idx); if (cur_schema.max_repetition_level == r) { // if this is a repeated field, map it one level deeper shallowest = cur_schema.is_stub() ? cur_depth + 1 : cur_depth; } // if it's one-level encoding list else if (cur_schema.is_one_level_list(md.get_schema(cur_schema.parent_idx))) { shallowest = cur_depth - 1; } if (!cur_schema.is_stub()) { cur_depth--; } schema_idx = cur_schema.parent_idx; } return shallowest; }; rep_depth_remap[s_idx] = find_shallowest(s_idx); } // compute "Y" from above for (int s_idx = schema.max_definition_level; s_idx >= 0; s_idx--) { auto find_deepest = [&](int d) { SchemaElement prev_schema; int schema_idx = src_col_schema; int r1 = 0; while (schema_idx > 0) { SchemaElement cur_schema = md.get_schema(schema_idx); if (cur_schema.max_definition_level == d) { // if this is a repeated field, map it one level deeper r1 = cur_schema.is_stub() ? prev_schema.max_repetition_level : cur_schema.max_repetition_level; break; } prev_schema = cur_schema; schema_idx = cur_schema.parent_idx; } // we now know R1 from above. return the deepest nesting level that has the // same repetition level schema_idx = src_col_schema; int depth = max_depth - 1; while (schema_idx > 0) { SchemaElement cur_schema = md.get_schema(schema_idx); if (cur_schema.max_repetition_level == r1) { // if this is a repeated field, map it one level deeper depth = cur_schema.is_stub() ? depth + 1 : depth; break; } if (!cur_schema.is_stub()) { depth--; } prev_schema = cur_schema; schema_idx = cur_schema.parent_idx; } return depth; }; def_depth_remap[s_idx] = find_deepest(s_idx); } } /** * @brief Return the required number of bits to store a value. */ template <typename T = uint8_t> [[nodiscard]] T required_bits(uint32_t max_level) { return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level)); } /** * @brief Converts cuDF units to Parquet units. * * @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type. */ [[nodiscard]] std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id, type_id timestamp_type_id, parquet::Type physical, int8_t converted, int32_t length) { int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0; int32_t clock_rate = 0; if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) { type_width = 1; // I32 -> I8 } else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) { type_width = 2; // I32 -> I16 } else if (column_type_id == type_id::INT32) { type_width = 4; // str -> hash32 } else if (is_chrono(data_type{column_type_id})) { clock_rate = to_clockrate(timestamp_type_id); } int8_t converted_type = converted; if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64 && not cudf::is_fixed_point(data_type{column_type_id})) { converted_type = parquet::UNKNOWN; // Not converting to float64 or decimal } return std::make_tuple(type_width, clock_rate, converted_type); } /** * @brief Reads compressed page data to device memory. * * @param sources Dataset sources * @param page_data Buffers to hold compressed page data for each chunk * @param chunks List of column chunk descriptors * @param begin_chunk Index of first column chunk to read * @param end_chunk Index after the last column chunk to read * @param column_chunk_offsets File offset for all chunks * @param chunk_source_map Association between each column chunk and its source * @param stream CUDA stream used for device memory operations and kernel launches * * @return A future object for reading synchronization */ [[nodiscard]] std::future<void> read_column_chunks_async( std::vector<std::unique_ptr<datasource>> const& sources, std::vector<std::unique_ptr<datasource::buffer>>& page_data, hostdevice_vector<gpu::ColumnChunkDesc>& chunks, size_t begin_chunk, size_t end_chunk, const std::vector<size_t>& column_chunk_offsets, std::vector<size_type> const& chunk_source_map, rmm::cuda_stream_view stream) { // Transfer chunk data, coalescing adjacent chunks std::vector<std::future<size_t>> read_tasks; for (size_t chunk = begin_chunk; chunk < end_chunk;) { const size_t io_offset = column_chunk_offsets[chunk]; size_t io_size = chunks[chunk].compressed_size; size_t next_chunk = chunk + 1; const bool is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED); while (next_chunk < end_chunk) { const size_t next_offset = column_chunk_offsets[next_chunk]; const bool is_next_compressed = (chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED); if (next_offset != io_offset + io_size || is_next_compressed != is_compressed) { // Can't merge if not contiguous or mixing compressed and uncompressed // Not coalescing uncompressed with compressed chunks is so that compressed buffers can be // freed earlier (immediately after decompression stage) to limit peak memory requirements break; } io_size += chunks[next_chunk].compressed_size; next_chunk++; } if (io_size != 0) { auto& source = sources[chunk_source_map[chunk]]; if (source->is_device_read_preferred(io_size)) { auto buffer = rmm::device_buffer(io_size, stream); auto fut_read_size = source->device_read_async( io_offset, io_size, static_cast<uint8_t*>(buffer.data()), stream); read_tasks.emplace_back(std::move(fut_read_size)); page_data[chunk] = datasource::buffer::create(std::move(buffer)); } else { auto const buffer = source->host_read(io_offset, io_size); page_data[chunk] = datasource::buffer::create(rmm::device_buffer(buffer->data(), buffer->size(), stream)); } auto d_compdata = page_data[chunk]->data(); do { chunks[chunk].compressed_data = d_compdata; d_compdata += chunks[chunk].compressed_size; } while (++chunk != next_chunk); } else { chunk = next_chunk; } } auto sync_fn = [](decltype(read_tasks) read_tasks) { for (auto& task : read_tasks) { task.wait(); } }; return std::async(std::launch::deferred, sync_fn, std::move(read_tasks)); } /** * @brief Return the number of total pages from the given column chunks. * * @param chunks List of column chunk descriptors * @param stream CUDA stream used for device memory operations and kernel launches * * @return The total number of pages */ [[nodiscard]] size_t count_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks, rmm::cuda_stream_view stream) { size_t total_pages = 0; chunks.host_to_device(stream); gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); chunks.device_to_host(stream, true); for (size_t c = 0; c < chunks.size(); c++) { total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages; } return total_pages; } // see setupLocalPageInfo() in page_data.cu for supported page encodings constexpr bool is_supported_encoding(Encoding enc) { switch (enc) { case Encoding::PLAIN: case Encoding::PLAIN_DICTIONARY: case Encoding::RLE: case Encoding::RLE_DICTIONARY: return true; default: return false; } } /** * @brief Decode the page information from the given column chunks. * * @param chunks List of column chunk descriptors * @param pages List of page information * @param stream CUDA stream used for device memory operations and kernel launches * @returns The size in bytes of level type data required */ int decode_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks, hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view stream) { // IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages), // please update preprocess_nested_columns to reflect this. for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages; chunks[c].page_info = pages.device_ptr(page_count); page_count += chunks[c].max_num_pages; } chunks.host_to_device(stream); gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream); // compute max bytes needed for level data auto level_bit_size = cudf::detail::make_counting_transform_iterator(0, [chunks = chunks.begin()] __device__(int i) { auto c = chunks[i]; return static_cast<int>( max(c.level_bits[gpu::level_type::REPETITION], c.level_bits[gpu::level_type::DEFINITION])); }); // max level data bit size. int const max_level_bits = thrust::reduce(rmm::exec_policy(stream), level_bit_size, level_bit_size + chunks.size(), 0, thrust::maximum<int>()); auto const level_type_size = std::max(1, cudf::util::div_rounding_up_safe(max_level_bits, 8)); pages.device_to_host(stream, true); // validate page encodings CUDF_EXPECTS(std::all_of(pages.begin(), pages.end(), [](auto const& page) { return is_supported_encoding(page.encoding); }), "Unsupported page encoding detected"); return level_type_size; } /** * @brief Decompresses the page data, at page granularity. * * @param chunks List of column chunk descriptors * @param pages List of page information * @param stream CUDA stream used for device memory operations and kernel launches * * @return Device buffer to decompressed page data */ [[nodiscard]] rmm::device_buffer decompress_page_data( hostdevice_vector<gpu::ColumnChunkDesc>& chunks, hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view stream) { auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)>& f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { for (int k = 0; k < page_stride; k++) { f(page_count + k); } } page_count += page_stride; } }; // Brotli scratch memory for decompressing rmm::device_buffer debrotli_scratch; // Count the exact number of compressed pages size_t num_comp_pages = 0; size_t total_decomp_size = 0; struct codec_stats { parquet::Compression compression_type = UNCOMPRESSED; size_t num_pages = 0; int32_t max_decompressed_size = 0; size_t total_decomp_size = 0; }; std::array codecs{codec_stats{parquet::GZIP}, codec_stats{parquet::SNAPPY}, codec_stats{parquet::BROTLI}, codec_stats{parquet::ZSTD}}; auto is_codec_supported = [&codecs](int8_t codec) { if (codec == parquet::UNCOMPRESSED) return true; return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) { return codec == cstats.compression_type; }) != codecs.end(); }; CUDF_EXPECTS(std::all_of(chunks.begin(), chunks.end(), [&is_codec_supported](auto const& chunk) { return is_codec_supported(chunk.codec); }), "Unsupported compression type"); for (auto& codec : codecs) { for_each_codec_page(codec.compression_type, [&](size_t page) { auto page_uncomp_size = pages[page].uncompressed_page_size; total_decomp_size += page_uncomp_size; codec.total_decomp_size += page_uncomp_size; codec.max_decompressed_size = std::max(codec.max_decompressed_size, page_uncomp_size); codec.num_pages++; num_comp_pages++; }); if (codec.compression_type == parquet::BROTLI && codec.num_pages > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream); } } // Dispatch batches of pages to decompress for each codec rmm::device_buffer decomp_pages(total_decomp_size, stream); std::vector<device_span<uint8_t const>> comp_in; comp_in.reserve(num_comp_pages); std::vector<device_span<uint8_t>> comp_out; comp_out.reserve(num_comp_pages); // vectors to save v2 def and rep level data, if any std::vector<device_span<uint8_t const>> copy_in; copy_in.reserve(num_comp_pages); std::vector<device_span<uint8_t>> copy_out; copy_out.reserve(num_comp_pages); rmm::device_uvector<compression_result> comp_res(num_comp_pages, stream); thrust::fill(rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), compression_result{0, compression_status::FAILURE}); size_t decomp_offset = 0; int32_t start_pos = 0; for (const auto& codec : codecs) { if (codec.num_pages == 0) { continue; } for_each_codec_page(codec.compression_type, [&](size_t page_idx) { auto const dst_base = static_cast<uint8_t*>(decomp_pages.data()) + decomp_offset; auto& page = pages[page_idx]; // offset will only be non-zero for V2 pages auto const offset = page.def_lvl_bytes + page.rep_lvl_bytes; // for V2 need to copy def and rep level info into place, and then offset the // input and output buffers. otherwise we'd have to keep both the compressed // and decompressed data. if (offset != 0) { copy_in.emplace_back(page.page_data, offset); copy_out.emplace_back(dst_base, offset); } comp_in.emplace_back(page.page_data + offset, static_cast<size_t>(page.compressed_page_size - offset)); comp_out.emplace_back(dst_base + offset, static_cast<size_t>(page.uncompressed_page_size - offset)); page.page_data = dst_base; decomp_offset += page.uncompressed_page_size; }); host_span<device_span<uint8_t const> const> comp_in_view{comp_in.data() + start_pos, codec.num_pages}; auto const d_comp_in = cudf::detail::make_device_uvector_async( comp_in_view, stream, rmm::mr::get_current_device_resource()); host_span<device_span<uint8_t> const> comp_out_view(comp_out.data() + start_pos, codec.num_pages); auto const d_comp_out = cudf::detail::make_device_uvector_async( comp_out_view, stream, rmm::mr::get_current_device_resource()); device_span<compression_result> d_comp_res_view(comp_res.data() + start_pos, codec.num_pages); switch (codec.compression_type) { case parquet::GZIP: gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream); break; case parquet::SNAPPY: if (nvcomp_integration::is_stable_enabled()) { nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY, d_comp_in, d_comp_out, d_comp_res_view, codec.max_decompressed_size, codec.total_decomp_size, stream); } else { gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream); } break; case parquet::ZSTD: nvcomp::batched_decompress(nvcomp::compression_type::ZSTD, d_comp_in, d_comp_out, d_comp_res_view, codec.max_decompressed_size, codec.total_decomp_size, stream); break; case parquet::BROTLI: gpu_debrotli(d_comp_in, d_comp_out, d_comp_res_view, debrotli_scratch.data(), debrotli_scratch.size(), stream); break; default: CUDF_FAIL("Unexpected decompression dispatch"); break; } start_pos += codec.num_pages; } CUDF_EXPECTS(thrust::all_of(rmm::exec_policy(stream), comp_res.begin(), comp_res.end(), [] __device__(auto const& res) { return res.status == compression_status::SUCCESS; }), "Error during decompression"); // now copy the uncompressed V2 def and rep level data if (not copy_in.empty()) { auto const d_copy_in = cudf::detail::make_device_uvector_async( copy_in, stream, rmm::mr::get_current_device_resource()); auto const d_copy_out = cudf::detail::make_device_uvector_async( copy_out, stream, rmm::mr::get_current_device_resource()); gpu_copy_uncompressed_blocks(d_copy_in, d_copy_out, stream); stream.synchronize(); } // Update the page information in device memory with the updated value of // page_data; it now points to the uncompressed data buffer pages.host_to_device(stream); return decomp_pages; } } // namespace void reader::impl::allocate_nesting_info() { auto const& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; auto& page_nesting_info = _file_itm_data.page_nesting_info; auto& page_nesting_decode_info = _file_itm_data.page_nesting_decode_info; // compute total # of page_nesting infos needed and allocate space. doing this in one // buffer to keep it to a single gpu allocation size_t const total_page_nesting_infos = std::accumulate( chunks.host_ptr(), chunks.host_ptr() + chunks.size(), 0, [&](int total, auto& chunk) { // the schema of the input column auto const& schema = _metadata->get_schema(chunk.src_col_schema); auto const per_page_nesting_info_size = max( schema.max_definition_level + 1, _metadata->get_output_nesting_depth(chunk.src_col_schema)); return total + (per_page_nesting_info_size * chunk.num_data_pages); }); page_nesting_info = hostdevice_vector<gpu::PageNestingInfo>{total_page_nesting_infos, _stream}; page_nesting_decode_info = hostdevice_vector<gpu::PageNestingDecodeInfo>{total_page_nesting_infos, _stream}; // update pointers in the PageInfos int target_page_index = 0; int src_info_index = 0; for (size_t idx = 0; idx < chunks.size(); idx++) { int src_col_schema = chunks[idx].src_col_schema; auto& schema = _metadata->get_schema(src_col_schema); auto const per_page_nesting_info_size = std::max( schema.max_definition_level + 1, _metadata->get_output_nesting_depth(src_col_schema)); // skip my dict pages target_page_index += chunks[idx].num_dict_pages; for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) { pages[target_page_index + p_idx].nesting = page_nesting_info.device_ptr() + src_info_index; pages[target_page_index + p_idx].nesting_decode = page_nesting_decode_info.device_ptr() + src_info_index; pages[target_page_index + p_idx].nesting_info_size = per_page_nesting_info_size; pages[target_page_index + p_idx].num_output_nesting_levels = _metadata->get_output_nesting_depth(src_col_schema); src_info_index += per_page_nesting_info_size; } target_page_index += chunks[idx].num_data_pages; } // fill in int nesting_info_index = 0; std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping; for (size_t idx = 0; idx < chunks.size(); idx++) { int src_col_schema = chunks[idx].src_col_schema; // schema of the input column auto& schema = _metadata->get_schema(src_col_schema); // real depth of the output cudf column hierarchy (1 == no nesting, 2 == 1 level, etc) int max_depth = _metadata->get_output_nesting_depth(src_col_schema); // # of nesting infos stored per page for this column auto const per_page_nesting_info_size = std::max(schema.max_definition_level + 1, max_depth); // if this column has lists, generate depth remapping std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping; if (schema.max_repetition_level > 0) { generate_depth_remappings(depth_remapping, src_col_schema, *_metadata); } // fill in host-side nesting info int schema_idx = src_col_schema; auto cur_schema = _metadata->get_schema(schema_idx); int cur_depth = max_depth - 1; while (schema_idx > 0) { // stub columns (basically the inner field of a list scheme element) are not real columns. // we can ignore them for the purposes of output nesting info if (!cur_schema.is_stub()) { // initialize each page within the chunk for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) { gpu::PageNestingInfo* pni = &page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; gpu::PageNestingDecodeInfo* nesting_info = &page_nesting_decode_info[nesting_info_index + (p_idx * per_page_nesting_info_size)]; // if we have lists, set our start and end depth remappings if (schema.max_repetition_level > 0) { auto remap = depth_remapping.find(src_col_schema); CUDF_EXPECTS(remap != depth_remapping.end(), "Could not find depth remapping for schema"); std::vector<int> const& rep_depth_remap = (remap->second.first); std::vector<int> const& def_depth_remap = (remap->second.second); for (size_t m = 0; m < rep_depth_remap.size(); m++) { nesting_info[m].start_depth = rep_depth_remap[m]; } for (size_t m = 0; m < def_depth_remap.size(); m++) { nesting_info[m].end_depth = def_depth_remap[m]; } } // values indexed by output column index nesting_info[cur_depth].max_def_level = cur_schema.max_definition_level; pni[cur_depth].size = 0; pni[cur_depth].type = to_type_id(cur_schema, _strings_to_categorical, _timestamp_type.id()); pni[cur_depth].nullable = cur_schema.repetition_type == OPTIONAL; } // move up the hierarchy cur_depth--; } // next schema schema_idx = cur_schema.parent_idx; cur_schema = _metadata->get_schema(schema_idx); } nesting_info_index += (per_page_nesting_info_size * chunks[idx].num_data_pages); } // copy nesting info to the device page_nesting_info.host_to_device(_stream); page_nesting_decode_info.host_to_device(_stream); } void reader::impl::allocate_level_decode_space() { auto& pages = _file_itm_data.pages_info; // TODO: this could be made smaller if we ignored dictionary pages and pages with no // repetition data. size_t const per_page_decode_buf_size = LEVEL_DECODE_BUF_SIZE * 2 * _file_itm_data.level_type_size; auto const decode_buf_size = per_page_decode_buf_size * pages.size(); _file_itm_data.level_decode_data = rmm::device_buffer(decode_buf_size, _stream, rmm::mr::get_current_device_resource()); // distribute the buffers uint8_t* buf = static_cast<uint8_t*>(_file_itm_data.level_decode_data.data()); for (size_t idx = 0; idx < pages.size(); idx++) { auto& p = pages[idx]; p.lvl_decode_buf[gpu::level_type::DEFINITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size); p.lvl_decode_buf[gpu::level_type::REPETITION] = buf; buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size); } } std::pair<bool, std::vector<std::future<void>>> reader::impl::create_and_read_column_chunks( cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows) { auto& raw_page_data = _file_itm_data.raw_page_data; auto& chunks = _file_itm_data.chunks; // Descriptors for all the chunks that make up the selected columns const auto num_input_columns = _input_columns.size(); const auto num_chunks = row_groups_info.size() * num_input_columns; chunks = hostdevice_vector<gpu::ColumnChunkDesc>(0, num_chunks, _stream); // Association between each column chunk and its source std::vector<size_type> chunk_source_map(num_chunks); // Tracker for eventually deallocating compressed and uncompressed data raw_page_data = std::vector<std::unique_ptr<datasource::buffer>>(num_chunks); // Keep track of column chunk file offsets std::vector<size_t> column_chunk_offsets(num_chunks); // Initialize column chunk information size_t total_decompressed_size = 0; auto remaining_rows = num_rows; std::vector<std::future<void>> read_rowgroup_tasks; for (const auto& rg : row_groups_info) { const auto& row_group = _metadata->get_row_group(rg.index, rg.source_index); auto const row_group_start = rg.start_row; auto const row_group_source = rg.source_index; auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows); // generate ColumnChunkDesc objects for everything to be decoded (all input columns) for (size_t i = 0; i < num_input_columns; ++i) { auto col = _input_columns[i]; // look up metadata auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx); auto& schema = _metadata->get_schema(col.schema_idx); auto [type_width, clock_rate, converted_type] = conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()), _timestamp_type.id(), schema.type, schema.converted_type, schema.type_length); column_chunk_offsets[chunks.size()] = (col_meta.dictionary_page_offset != 0) ? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) : col_meta.data_page_offset; chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size, nullptr, col_meta.num_values, schema.type, type_width, row_group_start, row_group_rows, schema.max_definition_level, schema.max_repetition_level, _metadata->get_output_nesting_depth(col.schema_idx), required_bits(schema.max_definition_level), required_bits(schema.max_repetition_level), col_meta.codec, converted_type, schema.logical_type, schema.decimal_precision, clock_rate, i, col.schema_idx)); // Map each column chunk to its column index and its source index chunk_source_map[chunks.size() - 1] = row_group_source; if (col_meta.codec != Compression::UNCOMPRESSED) { total_decompressed_size += col_meta.total_uncompressed_size; } } remaining_rows -= row_group_rows; } // Read compressed chunk data to device memory read_rowgroup_tasks.push_back(read_column_chunks_async(_sources, raw_page_data, chunks, 0, chunks.size(), column_chunk_offsets, chunk_source_map, _stream)); CUDF_EXPECTS(remaining_rows == 0, "All rows data must be read."); return {total_decompressed_size > 0, std::move(read_rowgroup_tasks)}; } void reader::impl::load_and_decompress_data( cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows) { // This function should never be called if `num_rows == 0`. CUDF_EXPECTS(num_rows > 0, "Number of reading rows must not be zero."); auto& raw_page_data = _file_itm_data.raw_page_data; auto& decomp_page_data = _file_itm_data.decomp_page_data; auto& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; auto const [has_compressed_data, read_rowgroup_tasks] = create_and_read_column_chunks(row_groups_info, num_rows); for (auto& task : read_rowgroup_tasks) { task.wait(); } // Process dataset chunk pages into output columns auto const total_pages = count_page_headers(chunks, _stream); pages = hostdevice_vector<gpu::PageInfo>(total_pages, total_pages, _stream); if (total_pages > 0) { // decoding of column/page information _file_itm_data.level_type_size = decode_page_headers(chunks, pages, _stream); if (has_compressed_data) { decomp_page_data = decompress_page_data(chunks, pages, _stream); // Free compressed data for (size_t c = 0; c < chunks.size(); c++) { if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { raw_page_data[c].reset(); } } } // build output column info // walk the schema, building out_buffers that mirror what our final cudf columns will look // like. important : there is not necessarily a 1:1 mapping between input columns and output // columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct // columns. The "structiness" is simply implied by the schema. For example, this schema: // required group field_id=1 name { // required binary field_id=2 firstname (String); // required binary field_id=3 middlename (String); // required binary field_id=4 lastname (String); // } // will only contain 3 columns of data (firstname, middlename, lastname). But of course // "name" is a struct column that we want to return, so we have to make sure that we // create it ourselves. // std::vector<output_column_info> output_info = build_output_column_info(); // the following two allocate functions modify the page data pages.device_to_host(_stream, true); { // nesting information (sizes, etc) stored -per page- // note : even for flat schemas, we allocate 1 level of "nesting" info allocate_nesting_info(); // level decode space allocate_level_decode_space(); } pages.host_to_device(_stream); } } namespace { struct cumulative_row_info { size_t row_count; // cumulative row count size_t size_bytes; // cumulative size in bytes int key; // schema index }; #if defined(PREPROCESS_DEBUG) void print_pages(hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view _stream) { pages.device_to_host(_stream, true); for (size_t idx = 0; idx < pages.size(); idx++) { auto const& p = pages[idx]; // skip dictionary pages if (p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; } printf( "P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d), " "str_bytes(%d)\n", idx, p.src_col_schema, p.chunk_row, p.num_rows, p.skipped_values, p.skipped_leaf_values, p.str_bytes); } } void print_cumulative_page_info(hostdevice_vector<gpu::PageInfo>& pages, rmm::device_uvector<int32_t> const& page_index, rmm::device_uvector<cumulative_row_info> const& c_info, rmm::cuda_stream_view stream) { pages.device_to_host(stream, true); printf("------------\nCumulative sizes by page\n"); std::vector<int> schemas(pages.size()); std::vector<int> h_page_index(pages.size()); CUDF_CUDA_TRY(cudaMemcpy( h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), cudaMemcpyDefault)); std::vector<cumulative_row_info> h_cinfo(pages.size()); CUDF_CUDA_TRY(cudaMemcpy( h_cinfo.data(), c_info.data(), sizeof(cumulative_row_info) * pages.size(), cudaMemcpyDefault)); auto schema_iter = cudf::detail::make_counting_transform_iterator( 0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; }); thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin()); auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end()); schemas.resize(last - schemas.begin()); printf("Num schemas: %lu\n", schemas.size()); for (size_t idx = 0; idx < schemas.size(); idx++) { printf("Schema %d\n", schemas[idx]); for (size_t pidx = 0; pidx < pages.size(); pidx++) { auto const& page = pages[h_page_index[pidx]]; if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) { continue; } printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes); } } } void print_cumulative_row_info( host_span<cumulative_row_info const> sizes, std::string const& label, std::optional<std::vector<gpu::chunk_read_info>> splits = std::nullopt) { if (splits.has_value()) { printf("------------\nSplits\n"); for (size_t idx = 0; idx < splits->size(); idx++) { printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows); } } printf("------------\nCumulative sizes %s\n", label.c_str()); for (size_t idx = 0; idx < sizes.size(); idx++) { printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key); if (splits.has_value()) { // if we have a split at this row count and this is the last instance of this row count auto start = thrust::make_transform_iterator( splits->begin(), [](gpu::chunk_read_info const& i) { return i.skip_rows; }); auto end = start + splits->size(); auto split = std::find(start, end, sizes[idx].row_count); auto const split_index = [&]() -> int { if (split != end && ((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) { return static_cast<int>(std::distance(start, split)); } return idx == 0 ? 0 : -1; }(); if (split_index >= 0) { printf(" <-- split {%lu, %lu}", splits.value()[split_index].skip_rows, splits.value()[split_index].num_rows); } } printf("\n"); } } #endif // PREPROCESS_DEBUG /** * @brief Functor which reduces two cumulative_row_info structs of the same key. */ struct cumulative_row_sum { cumulative_row_info operator() __device__(cumulative_row_info const& a, cumulative_row_info const& b) const { return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key}; } }; /** * @brief Functor which computes the total data size for a given type of cudf column. * * In the case of strings, the return size does not include the chars themselves. That * information is tracked separately (see PageInfo::str_bytes). */ struct row_size_functor { __device__ size_t validity_size(size_t num_rows, bool nullable) { return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0; } template <typename T> __device__ size_t operator()(size_t num_rows, bool nullable) { auto const element_size = sizeof(device_storage_type_t<T>); return (element_size * num_rows) + validity_size(num_rows, nullable); } }; template <> __device__ size_t row_size_functor::operator()<list_view>(size_t num_rows, bool nullable) { auto const offset_size = sizeof(offset_type); // NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset // for the entire column, whereas this is adding an extra offset per page. So we will get a // small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better // to overestimate size somewhat than to underestimate it and potentially generate chunks // that are too large. return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); } template <> __device__ size_t row_size_functor::operator()<struct_view>(size_t num_rows, bool nullable) { return validity_size(num_rows, nullable); } template <> __device__ size_t row_size_functor::operator()<string_view>(size_t num_rows, bool nullable) { // only returns the size of offsets and validity. the size of the actual string chars // is tracked separately. auto const offset_size = sizeof(offset_type); // see note about offsets in the list_view template. return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable); } /** * @brief Functor which computes the total output cudf data size for all of * the data in this page. * * Sums across all nesting levels. */ struct get_cumulative_row_info { gpu::PageInfo const* const pages; __device__ cumulative_row_info operator()(size_type index) { auto const& page = pages[index]; if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { return cumulative_row_info{0, 0, page.src_col_schema}; } // total nested size, not counting string data auto iter = cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) { auto const& pni = page.nesting[i]; return cudf::type_dispatcher( data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable); }); size_t const row_count = static_cast<size_t>(page.nesting[0].size); return { row_count, thrust::reduce(thrust::seq, iter, iter + page.num_output_nesting_levels) + page.str_bytes, page.src_col_schema}; } }; /** * @brief Functor which computes the effective size of all input columns by page. * * For a given row, we want to find the cost of all pages for all columns involved * in loading up to that row. The complication here is that not all pages are the * same size between columns. Example: * * page row counts * Column A: 0 <----> 100 <----> 200 * Column B: 0 <---------------> 200 <--------> 400 | * if we decide to split at row 100, we don't really know the actual amount of bytes in column B * at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that * page. Essentially, a conservative over-estimate of the real size. */ struct row_total_size { cumulative_row_info const* c_info; size_type const* key_offsets; size_t num_keys; __device__ cumulative_row_info operator()(cumulative_row_info const& i) { // sum sizes for each input column at this row size_t sum = 0; for (int idx = 0; idx < num_keys; idx++) { auto const start = key_offsets[idx]; auto const end = key_offsets[idx + 1]; auto iter = cudf::detail::make_counting_transform_iterator( 0, [&] __device__(size_type i) { return c_info[i].row_count; }); auto const page_index = thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter; sum += c_info[page_index].size_bytes; } return {i.row_count, sum, i.key}; } }; /** * @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read * limit, determine the set of splits. * * @param sizes Vector of cumulative {row_count, byte_size} pairs * @param num_rows Total number of rows to read * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns */ std::vector<gpu::chunk_read_info> find_splits(std::vector<cumulative_row_info> const& sizes, size_t num_rows, size_t chunk_read_limit) { // now we have an array of {row_count, real output bytes}. just walk through it and generate // splits. // TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch // sizes are reasonably large, this shouldn't iterate too many times std::vector<gpu::chunk_read_info> splits; { size_t cur_pos = 0; size_t cur_cumulative_size = 0; size_t cur_row_count = 0; auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) { return i.size_bytes - cur_cumulative_size; }); auto end = start + sizes.size(); while (cur_row_count < num_rows) { int64_t split_pos = thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start; // if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back // one. if (static_cast<size_t>(split_pos) >= sizes.size() || (sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) { split_pos--; } // best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in // a loop because all of the cumulative sizes for all the pages are sorted into one big list. // so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in // the list twice. so we have to iterate until we skip past all of them. The idea is that we // either do this, or we have to call unique() on the input first. while (split_pos < (static_cast<int64_t>(sizes.size()) - 1) && (split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) { split_pos++; } auto const start_row = cur_row_count; cur_row_count = sizes[split_pos].row_count; splits.push_back(gpu::chunk_read_info{start_row, cur_row_count - start_row}); cur_pos = split_pos; cur_cumulative_size = sizes[split_pos].size_bytes; } } // print_cumulative_row_info(sizes, "adjusted", splits); return splits; } /** * @brief Given a set of pages that have had their sizes computed by nesting level and * a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing * a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes. * * @param pages All pages in the file * @param id Additional intermediate information required to process the pages * @param num_rows Total number of rows to read * @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns * @param stream CUDA stream to use, default 0 */ std::vector<gpu::chunk_read_info> compute_splits(hostdevice_vector<gpu::PageInfo>& pages, gpu::chunk_intermediate_data const& id, size_t num_rows, size_t chunk_read_limit, rmm::cuda_stream_view stream) { auto const& page_keys = id.page_keys; auto const& page_index = id.page_index; // generate cumulative row counts and sizes rmm::device_uvector<cumulative_row_info> c_info(page_keys.size(), stream); // convert PageInfo to cumulative_row_info auto page_input = thrust::make_transform_iterator(page_index.begin(), get_cumulative_row_info{pages.device_ptr()}); thrust::inclusive_scan_by_key(rmm::exec_policy(stream), page_keys.begin(), page_keys.end(), page_input, c_info.begin(), thrust::equal_to{}, cumulative_row_sum{}); // print_cumulative_page_info(pages, page_index, c_info, stream); // sort by row count rmm::device_uvector<cumulative_row_info> c_info_sorted{c_info, stream}; thrust::sort(rmm::exec_policy(stream), c_info_sorted.begin(), c_info_sorted.end(), [] __device__(cumulative_row_info const& a, cumulative_row_info const& b) { return a.row_count < b.row_count; }); // std::vector<cumulative_row_info> h_c_info_sorted(c_info_sorted.size()); // CUDF_CUDA_TRY(cudaMemcpy(h_c_info_sorted.data(), // c_info_sorted.data(), // sizeof(cumulative_row_info) * c_info_sorted.size(), // cudaMemcpyDefault)); // print_cumulative_row_info(h_c_info_sorted, "raw"); // generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per // key rmm::device_uvector<size_type> key_offsets(page_keys.size() + 1, stream); auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(stream), page_keys.begin(), page_keys.end(), thrust::make_constant_iterator(1), thrust::make_discard_iterator(), key_offsets.begin()) .second; size_t const num_unique_keys = key_offsets_end - key_offsets.begin(); thrust::exclusive_scan( rmm::exec_policy(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin()); // adjust the cumulative info such that for each row count, the size includes any pages that span // that row count. this is so that if we have this case: // page row counts // Column A: 0 <----> 100 <----> 200 // Column B: 0 <---------------> 200 <--------> 400 // | // if we decide to split at row 100, we don't really know the actual amount of bytes in column B // at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that // page. // rmm::device_uvector<cumulative_row_info> aggregated_info(c_info.size(), stream); thrust::transform(rmm::exec_policy(stream), c_info_sorted.begin(), c_info_sorted.end(), aggregated_info.begin(), row_total_size{c_info.data(), key_offsets.data(), num_unique_keys}); // bring back to the cpu std::vector<cumulative_row_info> h_aggregated_info(aggregated_info.size()); CUDF_CUDA_TRY(cudaMemcpyAsync(h_aggregated_info.data(), aggregated_info.data(), sizeof(cumulative_row_info) * c_info.size(), cudaMemcpyDefault, stream.value())); stream.synchronize(); return find_splits(h_aggregated_info, num_rows, chunk_read_limit); } struct get_page_chunk_idx { __device__ size_type operator()(gpu::PageInfo const& page) { return page.chunk_idx; } }; struct get_page_num_rows { __device__ size_type operator()(gpu::PageInfo const& page) { return page.num_rows; } }; struct get_page_schema { __device__ size_type operator()(gpu::PageInfo const& page) { return page.src_col_schema; } }; struct input_col_info { int const schema_idx; size_type const nesting_depth; }; /** * @brief Converts a 1-dimensional index into page, depth and column indices used in * allocate_columns to compute columns sizes. * * The input index will iterate through pages, nesting depth and column indices in that order. */ struct reduction_indices { size_t const page_idx; size_type const depth_idx; size_type const col_idx; __device__ reduction_indices(size_t index_, size_type max_depth_, size_t num_pages_) : page_idx(index_ % num_pages_), depth_idx((index_ / num_pages_) % max_depth_), col_idx(index_ / (max_depth_ * num_pages_)) { } }; /** * @brief Returns the size field of a PageInfo struct for a given depth, keyed by schema. */ struct get_page_nesting_size { input_col_info const* const input_cols; size_type const max_depth; size_t const num_pages; gpu::PageInfo const* const pages; int const* page_indices; __device__ size_type operator()(size_t index) const { auto const indices = reduction_indices{index, max_depth, num_pages}; auto const& page = pages[page_indices[indices.page_idx]]; if (page.src_col_schema != input_cols[indices.col_idx].schema_idx || page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return 0; } return page.nesting[indices.depth_idx].batch_size; } }; struct get_reduction_key { size_t const num_pages; __device__ size_t operator()(size_t index) const { return index / num_pages; } }; /** * @brief Writes to the chunk_row field of the PageInfo struct. */ struct chunk_row_output_iter { gpu::PageInfo* p; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; __host__ __device__ chunk_row_output_iter operator+(int i) { return chunk_row_output_iter{p + i}; } __host__ __device__ void operator++() { p++; } __device__ reference operator[](int i) { return p[i].chunk_row; } __device__ reference operator*() { return p->chunk_row; } }; /** * @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema. */ struct start_offset_output_iterator { gpu::PageInfo const* pages; int const* page_indices; size_t cur_index; input_col_info const* input_cols; size_type max_depth; size_t num_pages; int empty = 0; using value_type = size_type; using difference_type = size_type; using pointer = size_type*; using reference = size_type&; using iterator_category = thrust::output_device_iterator_tag; constexpr void operator=(start_offset_output_iterator const& other) { pages = other.pages; page_indices = other.page_indices; cur_index = other.cur_index; input_cols = other.input_cols; max_depth = other.max_depth; num_pages = other.num_pages; } constexpr start_offset_output_iterator operator+(size_t i) { return start_offset_output_iterator{ pages, page_indices, cur_index + i, input_cols, max_depth, num_pages}; } constexpr void operator++() { cur_index++; } __device__ reference operator[](size_t i) { return dereference(cur_index + i); } __device__ reference operator*() { return dereference(cur_index); } private: __device__ reference dereference(size_t index) { auto const indices = reduction_indices{index, max_depth, num_pages}; gpu::PageInfo const& p = pages[page_indices[indices.page_idx]]; if (p.src_col_schema != input_cols[indices.col_idx].schema_idx || p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) { return empty; } return p.nesting_decode[indices.depth_idx].page_start_value; } }; struct flat_column_num_rows { gpu::PageInfo const* pages; gpu::ColumnChunkDesc const* chunks; __device__ size_type operator()(size_type pindex) const { gpu::PageInfo const& page = pages[pindex]; // ignore dictionary pages and pages belonging to any column containing repetition (lists) if ((page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) || (chunks[page.chunk_idx].max_level[gpu::level_type::REPETITION] > 0)) { return 0; } return page.num_rows; } }; struct row_counts_nonzero { __device__ bool operator()(size_type count) const { return count > 0; } }; struct row_counts_different { size_type const expected; __device__ bool operator()(size_type count) const { return (count != 0) && (count != expected); } }; /** * @brief Detect malformed parquet input data. * * We have seen cases where parquet files can be oddly malformed. This function specifically * detects one case in particular: * * - When you have a file containing N rows * - For some reason, the sum total of the number of rows over all pages for a given column * is != N * * @param pages All pages to be decoded * @param chunks Chunk data * @param page_keys Keys (schema id) associated with each page, sorted by column * @param page_index Page indices for iteration, sorted by column * @param expected_row_count Expected row count, if applicable * @param stream CUDA stream used for device memory operations and kernel launches */ void detect_malformed_pages(hostdevice_vector<gpu::PageInfo>& pages, hostdevice_vector<gpu::ColumnChunkDesc> const& chunks, device_span<const int> page_keys, device_span<const int> page_index, std::optional<size_t> expected_row_count, rmm::cuda_stream_view stream) { // sum row counts for all non-dictionary, non-list columns. other columns will be indicated as 0 rmm::device_uvector<size_type> row_counts(pages.size(), stream); // worst case: num keys == num pages auto const size_iter = thrust::make_transform_iterator( page_index.begin(), flat_column_num_rows{pages.device_ptr(), chunks.device_ptr()}); auto const row_counts_begin = row_counts.begin(); auto const row_counts_end = thrust::reduce_by_key(rmm::exec_policy(stream), page_keys.begin(), page_keys.end(), size_iter, thrust::make_discard_iterator(), row_counts_begin) .second; // make sure all non-zero row counts are the same rmm::device_uvector<size_type> compacted_row_counts(pages.size(), stream); auto const compacted_row_counts_begin = compacted_row_counts.begin(); auto const compacted_row_counts_end = thrust::copy_if(rmm::exec_policy(stream), row_counts_begin, row_counts_end, compacted_row_counts_begin, row_counts_nonzero{}); if (compacted_row_counts_end != compacted_row_counts_begin) { size_t const found_row_count = static_cast<size_t>(compacted_row_counts.element(0, stream)); // if we somehow don't match the expected row count from the row groups themselves if (expected_row_count.has_value()) { CUDF_EXPECTS(expected_row_count.value() == found_row_count, "Encountered malformed parquet page data (unexpected row count in page data)"); } // all non-zero row counts must be the same auto const chk = thrust::count_if(rmm::exec_policy(stream), compacted_row_counts_begin, compacted_row_counts_end, row_counts_different{static_cast<size_type>(found_row_count)}); CUDF_EXPECTS(chk == 0, "Encountered malformed parquet page data (row count mismatch in page data)"); } } } // anonymous namespace void reader::impl::preprocess_pages(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds, size_t chunk_read_limit) { auto& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; // compute page ordering. // // ordering of pages is by input column schema, repeated across row groups. so // if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like // // 1, 1, 2, 2, 3, 3 // // However, if we had more than one row group, the pattern would be // // 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3 // ^ row group 0 | // ^ row group 1 // // To process pages by key (exclusive_scan_by_key, reduce_by_key, etc), the ordering we actually // want is // // 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3 // // We also need to preserve key-relative page ordering, so we need to use a stable sort. rmm::device_uvector<int> page_keys(pages.size(), _stream); rmm::device_uvector<int> page_index(pages.size(), _stream); { thrust::transform(rmm::exec_policy(_stream), pages.device_ptr(), pages.device_ptr() + pages.size(), page_keys.begin(), get_page_schema{}); thrust::sequence(rmm::exec_policy(_stream), page_index.begin(), page_index.end()); thrust::stable_sort_by_key(rmm::exec_policy(_stream), page_keys.begin(), page_keys.end(), page_index.begin(), thrust::less<int>()); } // detect malformed columns. // - we have seen some cases in the wild where we have a row group containing N // rows, but the total number of rows in the pages for column X is != N. while it // is possible to load this by just capping the number of rows read, we cannot tell // which rows are invalid so we may be returning bad data. in addition, this mismatch // confuses the chunked reader detect_malformed_pages(pages, chunks, page_keys, page_index, uses_custom_row_bounds ? std::nullopt : std::make_optional(num_rows), _stream); // iterate over all input columns and determine if they contain lists so we can further // preprocess them. bool has_lists = false; for (size_t idx = 0; idx < _input_columns.size(); idx++) { auto const& input_col = _input_columns[idx]; size_t const max_depth = input_col.nesting_depth(); auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < max_depth; l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the // data computed during gpu::ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; break; } } if (has_lists) { break; } } // generate string dict indices if necessary { auto is_dict_chunk = [](const gpu::ColumnChunkDesc& chunk) { return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0; }; // Count the number of string dictionary entries // NOTE: Assumes first page in the chunk is always the dictionary page size_t total_str_dict_indexes = 0; for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_input_values; } page_count += chunks[c].max_num_pages; } // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements _chunk_itm_data.str_dict_index = cudf::detail::make_zeroed_device_uvector_async<string_index_pair>( total_str_dict_indexes, _stream, rmm::mr::get_current_device_resource()); // Update chunks with pointers to string dict indices for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) { input_column_info const& input_col = _input_columns[chunks[c].src_col_index]; CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema, "Column/page schema index mismatch"); if (is_dict_chunk(chunks[c])) { chunks[c].str_dict_index = _chunk_itm_data.str_dict_index.data() + str_ofs; str_ofs += pages[page_count].num_input_values; } // column_data_base will always point to leaf data, even for nested types. page_count += chunks[c].max_num_pages; } if (total_str_dict_indexes > 0) { chunks.host_to_device(_stream); gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream); } } // intermediate data we will need for further chunked reads if (has_lists || chunk_read_limit > 0) { // computes: // PageNestingInfo::num_rows for each page. the true number of rows (taking repetition into // account), not just the number of values. PageNestingInfo::size for each level of nesting, for // each page. // // we will be applying a later "trim" pass if skip_rows/num_rows is being used, which can happen // if: // - user has passed custom row bounds // - we will be doing a chunked read gpu::ComputePageSizes(pages, chunks, 0, // 0-max size_t. process all possible rows std::numeric_limits<size_t>::max(), true, // compute num_rows chunk_read_limit > 0, // compute string sizes _file_itm_data.level_type_size, _stream); // computes: // PageInfo::chunk_row (the absolute start row index) for all pages // Note: this is doing some redundant work for pages in flat hierarchies. chunk_row has already // been computed during header decoding. the overall amount of work here is very small though. auto key_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_chunk_idx{}); auto page_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_num_rows{}); thrust::exclusive_scan_by_key(rmm::exec_policy(_stream), key_input, key_input + pages.size(), page_input, chunk_row_output_iter{pages.device_ptr()}); // preserve page ordering data _chunk_itm_data.page_keys = std::move(page_keys); _chunk_itm_data.page_index = std::move(page_index); // retrieve pages back pages.device_to_host(_stream, true); // print_pages(pages, _stream); } // compute splits if necessary. otherwise return a single split representing // the whole file. _chunk_read_info = chunk_read_limit > 0 ? compute_splits(pages, _chunk_itm_data, num_rows, chunk_read_limit, _stream) : std::vector<gpu::chunk_read_info>{{skip_rows, num_rows}}; } void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds) { auto const& chunks = _file_itm_data.chunks; auto& pages = _file_itm_data.pages_info; // Should not reach here if there is no page data. CUDF_EXPECTS(pages.size() > 0, "There is no page to parse"); // computes: // PageNestingInfo::batch_size for each level of nesting, for each page, taking row bounds into // account. PageInfo::skipped_values, which tells us where to start decoding in the input to // respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds // is set (if the user has specified artificial bounds). if (uses_custom_row_bounds) { gpu::ComputePageSizes(pages, chunks, skip_rows, num_rows, false, // num_rows is already computed false, // no need to compute string sizes _file_itm_data.level_type_size, _stream); // print_pages(pages, _stream); } // iterate over all input columns and allocate any associated output // buffers if they are not part of a list hierarchy. mark down // if we have any list columns that need further processing. bool has_lists = false; for (size_t idx = 0; idx < _input_columns.size(); idx++) { auto const& input_col = _input_columns[idx]; size_t const max_depth = input_col.nesting_depth(); auto* cols = &_output_buffers; for (size_t l_idx = 0; l_idx < max_depth; l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this has a list parent, we have to get column sizes from the // data computed during gpu::ComputePageSizes if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) { has_lists = true; } // if we haven't already processed this column because it is part of a struct hierarchy else if (out_buf.size == 0) { // add 1 for the offset if this is a list column out_buf.create( out_buf.type.id() == type_id::LIST && l_idx < max_depth ? num_rows + 1 : num_rows, _stream, _mr); } } } // compute output column sizes by examining the pages of the -input- columns if (has_lists) { auto& page_index = _chunk_itm_data.page_index; std::vector<input_col_info> h_cols_info; h_cols_info.reserve(_input_columns.size()); std::transform(_input_columns.cbegin(), _input_columns.cend(), std::back_inserter(h_cols_info), [](auto& col) -> input_col_info { return {col.schema_idx, static_cast<size_type>(col.nesting_depth())}; }); auto const max_depth = (*std::max_element(h_cols_info.cbegin(), h_cols_info.cend(), [](auto& l, auto& r) { return l.nesting_depth < r.nesting_depth; })) .nesting_depth; auto const d_cols_info = cudf::detail::make_device_uvector_async( h_cols_info, _stream, rmm::mr::get_current_device_resource()); auto const num_keys = _input_columns.size() * max_depth * pages.size(); // size iterator. indexes pages by sorted order rmm::device_uvector<size_type> size_input{num_keys, _stream}; thrust::transform( rmm::exec_policy(_stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(num_keys), size_input.begin(), get_page_nesting_size{ d_cols_info.data(), max_depth, pages.size(), pages.device_ptr(), page_index.begin()}); auto const reduction_keys = cudf::detail::make_counting_transform_iterator(0, get_reduction_key{pages.size()}); hostdevice_vector<size_t> sizes{_input_columns.size() * max_depth, _stream}; // find the size of each column thrust::reduce_by_key(rmm::exec_policy(_stream), reduction_keys, reduction_keys + num_keys, size_input.cbegin(), thrust::make_discard_iterator(), sizes.d_begin()); // for nested hierarchies, compute per-page start offset thrust::exclusive_scan_by_key( rmm::exec_policy(_stream), reduction_keys, reduction_keys + num_keys, size_input.cbegin(), start_offset_output_iterator{ pages.device_ptr(), page_index.begin(), 0, d_cols_info.data(), max_depth, pages.size()}); sizes.device_to_host(_stream, true); for (size_type idx = 0; idx < static_cast<size_type>(_input_columns.size()); idx++) { auto const& input_col = _input_columns[idx]; auto* cols = &_output_buffers; for (size_type l_idx = 0; l_idx < static_cast<size_type>(input_col.nesting_depth()); l_idx++) { auto& out_buf = (*cols)[input_col.nesting[l_idx]]; cols = &out_buf.children; // if this buffer is part of a list hierarchy, we need to determine it's // final size and allocate it here. // // for struct columns, higher levels of the output columns are shared between input // columns. so don't compute any given level more than once. if ((out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) && out_buf.size == 0) { auto size = sizes[(idx * max_depth) + l_idx]; // if this is a list column add 1 for non-leaf levels for the terminating offset if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; } // allocate out_buf.create(size, _stream, _mr); } } } } } } // namespace cudf::io::detail::parquet
31f9f58c6f2279a67016cbdaae52e30fb7823b2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // sweep-tt-multistart.c - using VELOCITYBOX and FLOATBOX // vim: set tabstop=2 softtabstop=2 shiftwidth=2 expandtab : //////////////////////////////////////////////////////////////////////////////// /********************************************************************************/ /* Given a velocity field v[nx][ny][nz] for a set of points (i,j,k) (where */ /* 0 <= i < nx, 0 <= j < ny, 0 <= k < nz) layed out on a grid with delta unit */ /* distance, compute the minimum travel time, tt[nx][ny][nz][numstart], for all */ /* points to the numstart starting points. The program is called as follows: */ /* */ /* sweep-tt-multistart vfile fsfile startfile */ /* */ // vfile is the velocity field file and has the .vbox format. /* */ /* fsfile is the forward star offset file and has the format: */ /* */ /* starsize */ /* oi oj ok for every forward star offset (oi,oj,ok) */ /* */ /* startfile contains starting points and has the format: */ /* */ /* numstart */ /* si sj sk for every starting point */ /* */ /* The program writes to "output.tt" the following: */ /* */ /* nx ny nz */ /* tt[i][j][k] for every point (i,j,k) in row-major order */ /* */ /* for every starting point. */ /* (Note, the program currently exits before this is done.) */ /********************************************************************************/ #include "iovelocity.h" #include "timing.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #define FSRADIUSMAX 7 /* maximum radius forward star */ #define FSMAX 818 /* maximum # of points in a forward star */ #define MODELMAX 250 /* maximum model dimension in X,Y,Z */ #define STARTMAX 4 /* maximum starting points */ #define GRIDX 256 #define GRIDY 256 #define GRIDZ 1 #define BLOCKX 1 #define BLOCKY 1 #define BLOCKZ 64 #define DEVNUM 3 const int starSplit[4] = {0, 330, 550, 818}; struct FS { /* forward start offset */ int i, j, k; /* point coordinates */ float d; /* distance to star center (0,0,0)*/ }; struct MODEL { /* model point */ float v; /* velocity */ float tt[STARTMAX]; /* travel time for starting points */ }; struct START { /* starting point */ int i, j , k; /* point coordinates */ }; int changed[STARTMAX]; struct FS fs[FSMAX]; __constant__ struct FS dc_fs[FSMAX]; struct START start[STARTMAX]; struct VELOCITYBOX vbox; // stores JUST velocities struct FLOATBOX ttboxes[STARTMAX]; // stores JUST travel times, one volume per starting point void cudaRun(int, int); __global__ void cudaWorker( int d_nx, int d_ny, int d_nz, int d_starstart, int d_starend, struct FS *pd_fs, float *pd_vboxflat, float *pd_ttboxflat, long *pd_anychange ); __device__ int sweepXYZ( int nx, int ny, int nz, int starstart, int starstop, struct FS *fs, float *vboxflat, float *ttboxflat ); int main(int argc, char* argv[]) { int i, j, k, nx, ny, nz, s; int numradius, starsize, numstart; int fsindex[FSRADIUSMAX]; float delta; FILE *fsfile, *ttfile, *startfile; const char *velocity_model_file = argv[1]; /* open velocity model file */ printf( "Loading velocity model file: %s...", velocity_model_file ); fflush( stdout ); //if( !vboxloadbinary( &vbox, velocity_model_file ) ) { if( !vboxloadtext( &vbox, velocity_model_file ) ) { printf( "Cannot open velocity model file: %s\n", velocity_model_file ); exit(1); } nx = vbox.box.nx; ny = vbox.box.ny; nz = vbox.box.nz; printf( " done.\n" ); fflush( stdout ); printf( "Velocity model dimensions: %d x %d x %d\n", nx, ny, nz ); /* open forward star offset file */ fsfile = fopen(argv[2],"r"); if(fsfile == NULL) { printf("Cannot open forward star offset file: %s\n", argv[2]); exit(1); } printf("Forward star offset file: %s\n", argv[2]); /* open file with starting points */ startfile = fopen(argv[3],"r"); if(startfile == NULL) { printf("Cannot open starting points file: %s\n", argv[4]); exit(1); } printf("Starting points file: %s\n", argv[3]); /* get delta */ delta = 10.0; printf("Delta: %f\n", delta); /* read forward star offsets */ starsize = 0; fscanf(fsfile, "%i", &starsize); printf("Forward star size: %d\n", starsize); for (i=0; i<FSRADIUSMAX; i++) { fsindex[i] = 0; } numradius = 0; for (i=0; i<starsize; i++) { fscanf(fsfile, "%i %i %i", &fs[i].i, &fs[i].j, &fs[i].k); fs[i].d = sqrt(fs[i].i*fs[i].i + fs[i].j*fs[i].j + fs[i].k*fs[i].k); if ((numradius+1) < fs[i].d) { fsindex[numradius] = i; numradius++; } fs[i].d = delta * fs[i].d; } printf("Forward star offsets read\n"); for (i=0; i<FSRADIUSMAX; i++) { printf("numradius: %d, fsindex[%d]: %d\n", numradius, i, fsindex[i]); } /* read starting points */ fscanf(startfile, "%i", &numstart); // initialize travel times for all starting points for( s = 0; s < numstart; s++ ) { // prepare travel time volumes boxalloc( &ttboxes[s], nx, ny, nz ); boxsetall( ttboxes[s], INFINITY ); // set the starting point to have a travel time of 0 fscanf( startfile, "%i %i %i", &i, &j, &k ); boxput( ttboxes[s], i, j, k, 0 ); printf( "starting point %d: %d %d %d\n", s, i, j, k ); start[s].i = i; start[s].j = j; start[s].k = k; } printf("Starting points read\n"); int nDevices; hipGetDeviceCount(&nDevices); printf("device: %d\n", nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } cudaRun(numstart, starsize); /* print travel times */ ttfile = fopen("output.tt","w"); if(ttfile == NULL) { printf("Can not open travel time output file: %s\n", "output.tt"); exit(1); } fprintf(ttfile, "%d %d %d\n", nx, ny, nz); for (s=0; s<numstart; s++) { fprintf(ttfile, "starting point: %d\n", s); for (i=0; i<nx; i++) { for (j=0; j<ny; j++) { for (k=0; k<nz; k++) { /* use %g for doubles */ fprintf(ttfile, "travel time for (%d,%d,%d): %f %d %d %d\n", i, j, k, boxget( ttboxes[s], i, j, k ), 0, 0, 0 ); } } } } } /* main */ void cudaRun( int numstart, int starsize ) { //constants const int blkNum = GRIDX * GRIDY * GRIDZ; const int blkSize = BLOCKX * BLOCKY * BLOCKZ; const int tNum = blkNum * blkSize; //host variables long anychange[DEVNUM][tNum]; int i, j, nx = vbox.box.nx, ny = vbox.box.ny, nz = vbox.box.nz, devIdx = 0, devNum = DEVNUM; int nCells = nx * ny * nz; size_t flatbytes = (size_t)nCells * sizeof(float); float ttflatbuffs[DEVNUM][nCells]; hipError_t err; //Cuda variables dim3 gridDim(GRIDX,GRIDY,GRIDZ); dim3 blockDim(BLOCKX,BLOCKY,BLOCKZ); float *pd_vboxflat[DEVNUM]; float *ppd_ttboxflat[DEVNUM][STARTMAX]; long *pd_anychange[DEVNUM]; hipStream_t streams[DEVNUM]; //allocate device memory for(devIdx = 0; devIdx < devNum; devIdx++){ hipSetDevice(devIdx); err = hipMalloc((void **)&pd_vboxflat[devIdx], flatbytes); if(err != hipSuccess) printf("pd_vboxflat malloc error\n"); err = hipMalloc((void **)&pd_anychange[devIdx], sizeof(long) * tNum); if(err != hipSuccess) printf( "pd_anychange malloc error\n"); for(i=0; i<STARTMAX; i++){ err = hipMalloc((void **)&ppd_ttboxflat[devIdx][i], flatbytes); if(err != hipSuccess) printf("ppd_ttboxflat malloc error\n"); } } //lock up host memory for async transfer hipHostRegister(fs, sizeof(fs), hipHostRegisterDefault); hipHostRegister(start, sizeof(start), hipHostRegisterDefault); hipHostRegister(vbox.box.flat, flatbytes, hipHostRegisterDefault); for(i=0; i<STARTMAX; i++) hipHostRegister(ttboxes[i].flat, flatbytes, hipHostRegisterDefault); //async copy memory from host to device for(devIdx = 0; devIdx < devNum; devIdx++){ hipSetDevice(devIdx); hipStreamCreate(&streams[devIdx]); //copy fs to device err = hipMemcpyToSymbolAsync(dc_fs, fs, sizeof(fs), 0, hipMemcpyHostToDevice, streams[devIdx]); if(err != hipSuccess) printf("dc_fs copy error\n"); printf("1\n"); //copy velosity box to device err = hipMemcpyAsync(pd_vboxflat[devIdx], vbox.box.flat, flatbytes, hipMemcpyHostToDevice, streams[devIdx]); if(err != hipSuccess) printf( "pd_vboxflat copy error\n" ); printf( "2\n" ); //copy travel time boxes to device for(i=0; i<STARTMAX; i++){ err = hipMemcpyAsync(ppd_ttboxflat[devIdx][i], ttboxes[i].flat, flatbytes, hipMemcpyHostToDevice, streams[devIdx]); if(err != hipSuccess) printf( "ppd_ttboxflat %d copy error\n", i ); } printf("3\n"); } hipStreamSynchronize(0); //run algorithm double tSweep = 0.0, tChangeTrans = 0.0, tSum = 0.0, tMerge = 0.0, tBoxTrans = 0.0, tTotal = 0.0; for(i=0; i<numstart; i++){ long sweepNum = 0, changeSum = 1; while (changeSum) {//run until no changes changeSum = 0; sweepNum++; //run splited forward stars on different devices reset_and_start_timer(); for(devIdx=0; devIdx<devNum; devIdx++){ hipSetDevice(devIdx); err = hipMemset(pd_anychange[devIdx], 0, sizeof(long) * tNum); if(err != hipSuccess) printf( "pd_anychange memset error\n"); hipLaunchKernelGGL(( cudaWorker), dim3(gridDim),dim3(blockDim), 0, 0, nx, ny, nz, starSplit[devIdx], starSplit[devIdx+1]-1, //Note: change the range to the original starsize only reduce 5ms time. dc_fs, pd_vboxflat[devIdx], ppd_ttboxflat[devIdx][i], pd_anychange[devIdx] ); } hipStreamSynchronize(0); //sync all devices tSweep = get_elapsed_msec(); if(err != hipSuccess) //check error printf(" hipGetLastError() returned %d: %s\n", err, hipGetErrorString(err)); //pull back and check changes reset_and_start_timer(); for(devIdx=0; devIdx<devNum; devIdx++){ hipSetDevice(devIdx); err = hipMemcpyAsync(anychange[devIdx], pd_anychange[devIdx], sizeof(long) * tNum, hipMemcpyDeviceToHost, streams[devIdx]); if(err != hipSuccess) printf("anychange copy error: %d\n", err); } hipStreamSynchronize(0); //sync all devices tChangeTrans = get_elapsed_msec(); reset_and_start_timer(); for(devIdx=0; devIdx<devNum; devIdx++) for(j = 0; j < tNum; j++) changeSum += anychange[devIdx][j]; tSum = get_elapsed_msec(); //copy travel time back from all devices and merge them and then put them back reset_and_start_timer(); for(devIdx = 0; devIdx < devNum; devIdx++){ hipSetDevice(devIdx); err = hipMemcpyAsync(ttflatbuffs[devIdx], ppd_ttboxflat[devIdx][i], flatbytes, hipMemcpyDeviceToHost, streams[devIdx]); if(err != hipSuccess) printf( "ppd_ttboxflat %d copy error\n", i ); } hipStreamSynchronize(0); //sync all devices tBoxTrans = get_elapsed_msec(); reset_and_start_timer(); float res; for(j = 0; j < nCells; j++){ res = INFINITY; for(devIdx = 0; devIdx < devNum; devIdx++) res = fminf(res, ttflatbuffs[devIdx][j]); ttboxes[i].flat[j] = res; } tMerge = get_elapsed_msec(); for(devIdx = 0; devIdx < devNum; devIdx++){ hipSetDevice(devIdx); err = hipMemcpyAsync(ppd_ttboxflat[devIdx][i], ttboxes[i].flat, flatbytes, hipMemcpyHostToDevice, streams[devIdx]); if(err != hipSuccess) printf( "ppd_ttboxflat %d copy error\n", i ); } hipStreamSynchronize(0); //sync all devices tTotal = tSweep + tChangeTrans + tSum + tBoxTrans + tMerge; printf(" start point: %d, sweep %d: %d changes, sweep %g, change trans %g\n\ sum %g, box trans %g, merg %g, total %g\n", i, sweepNum, changeSum, tSweep, tChangeTrans, tSum, tBoxTrans, tMerge, tTotal); } memcpy( ttboxes[i].flat, ttflatbuffs[0], flatbytes); } printf("6\n"); for(devIdx=0; devIdx<devNum; devIdx++){ hipFree(pd_vboxflat[devIdx]); hipFree(pd_anychange[devIdx]); for(i=0; i<STARTMAX; i++) err = hipFree(ppd_ttboxflat[devIdx][i]); hipStreamDestroy(streams[devIdx]); } } __global__ void cudaWorker( int d_nx, int d_ny, int d_nz, int d_starstart, int d_starend, struct FS *pd_fs, float *pd_vboxflat, float *pd_ttboxflat, long *pd_anychange ) { //int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; int d_blkid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int d_glbtid = d_blkid * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; //int blkSize = blockDim.x * blockDim.y * blockDim.z; pd_anychange[d_glbtid] = sweepXYZ( d_nx, d_ny, d_nz, d_starstart, d_starend, dc_fs, pd_vboxflat, pd_ttboxflat ); } __device__ int sweepXYZ( int nx, int ny, int nz, int starstart, int starstop, struct FS *fs, float *vboxflat, float *ttboxflat ) { int i, j, k, l, oi, oj, ok, iIdx, oIdx; float delay = 0.0, tt = 0.0, tto = 0.0, ttd = 0.0, ttod = 0.0; int sx = nz * ny; int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; __shared__ int change; if(d_blktid == 0) change = 0; __syncthreads(); i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if(i >= nx || j >= ny || k >= nz) return 0; for (l=starstart; l<starstop; l++) { /* find point in forward star based on offsets */ oi = i+fs[l].i; oj = j+fs[l].j; ok = k+fs[l].k; /* if (oi,oj,ok) is outside the boundaries, then skip */ if ((oi < 0) || (oi > nx-1) || (oj < 0) || (oj > ny-1) || (ok < 0) || (ok > nz-1)) { continue; } //pre-compute all the needed values iIdx = k+nz*j+i*sx; oIdx = ok+nz*oj+oi*sx; delay = fs[l].d * (vboxflat[iIdx] + vboxflat[oIdx]) / 2.0; tt = ttboxflat[iIdx]; tto = ttboxflat[oIdx]; ttd = tt + delay; ttod = tto + delay; //if the difference between two values is greater than delay //do value switches using pre-calculated values. if(fabs(tt-tto) > delay){ ttboxflat[iIdx] = fminf(tt, ttod); ttboxflat[oIdx] = fminf(tto, ttd); if(change == 0 && (ttod < tt || ttd < tto)) change = 1; } } return(change); } /* end sweepXYZ */
31f9f58c6f2279a67016cbdaae52e30fb7823b2c.cu
//////////////////////////////////////////////////////////////////////////////// // sweep-tt-multistart.c - using VELOCITYBOX and FLOATBOX // vim: set tabstop=2 softtabstop=2 shiftwidth=2 expandtab : //////////////////////////////////////////////////////////////////////////////// /********************************************************************************/ /* Given a velocity field v[nx][ny][nz] for a set of points (i,j,k) (where */ /* 0 <= i < nx, 0 <= j < ny, 0 <= k < nz) layed out on a grid with delta unit */ /* distance, compute the minimum travel time, tt[nx][ny][nz][numstart], for all */ /* points to the numstart starting points. The program is called as follows: */ /* */ /* sweep-tt-multistart vfile fsfile startfile */ /* */ // vfile is the velocity field file and has the .vbox format. /* */ /* fsfile is the forward star offset file and has the format: */ /* */ /* starsize */ /* oi oj ok for every forward star offset (oi,oj,ok) */ /* */ /* startfile contains starting points and has the format: */ /* */ /* numstart */ /* si sj sk for every starting point */ /* */ /* The program writes to "output.tt" the following: */ /* */ /* nx ny nz */ /* tt[i][j][k] for every point (i,j,k) in row-major order */ /* */ /* for every starting point. */ /* (Note, the program currently exits before this is done.) */ /********************************************************************************/ #include "iovelocity.h" #include "timing.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #define FSRADIUSMAX 7 /* maximum radius forward star */ #define FSMAX 818 /* maximum # of points in a forward star */ #define MODELMAX 250 /* maximum model dimension in X,Y,Z */ #define STARTMAX 4 /* maximum starting points */ #define GRIDX 256 #define GRIDY 256 #define GRIDZ 1 #define BLOCKX 1 #define BLOCKY 1 #define BLOCKZ 64 #define DEVNUM 3 const int starSplit[4] = {0, 330, 550, 818}; struct FS { /* forward start offset */ int i, j, k; /* point coordinates */ float d; /* distance to star center (0,0,0)*/ }; struct MODEL { /* model point */ float v; /* velocity */ float tt[STARTMAX]; /* travel time for starting points */ }; struct START { /* starting point */ int i, j , k; /* point coordinates */ }; int changed[STARTMAX]; struct FS fs[FSMAX]; __constant__ struct FS dc_fs[FSMAX]; struct START start[STARTMAX]; struct VELOCITYBOX vbox; // stores JUST velocities struct FLOATBOX ttboxes[STARTMAX]; // stores JUST travel times, one volume per starting point void cudaRun(int, int); __global__ void cudaWorker( int d_nx, int d_ny, int d_nz, int d_starstart, int d_starend, struct FS *pd_fs, float *pd_vboxflat, float *pd_ttboxflat, long *pd_anychange ); __device__ int sweepXYZ( int nx, int ny, int nz, int starstart, int starstop, struct FS *fs, float *vboxflat, float *ttboxflat ); int main(int argc, char* argv[]) { int i, j, k, nx, ny, nz, s; int numradius, starsize, numstart; int fsindex[FSRADIUSMAX]; float delta; FILE *fsfile, *ttfile, *startfile; const char *velocity_model_file = argv[1]; /* open velocity model file */ printf( "Loading velocity model file: %s...", velocity_model_file ); fflush( stdout ); //if( !vboxloadbinary( &vbox, velocity_model_file ) ) { if( !vboxloadtext( &vbox, velocity_model_file ) ) { printf( "Cannot open velocity model file: %s\n", velocity_model_file ); exit(1); } nx = vbox.box.nx; ny = vbox.box.ny; nz = vbox.box.nz; printf( " done.\n" ); fflush( stdout ); printf( "Velocity model dimensions: %d x %d x %d\n", nx, ny, nz ); /* open forward star offset file */ fsfile = fopen(argv[2],"r"); if(fsfile == NULL) { printf("Cannot open forward star offset file: %s\n", argv[2]); exit(1); } printf("Forward star offset file: %s\n", argv[2]); /* open file with starting points */ startfile = fopen(argv[3],"r"); if(startfile == NULL) { printf("Cannot open starting points file: %s\n", argv[4]); exit(1); } printf("Starting points file: %s\n", argv[3]); /* get delta */ delta = 10.0; printf("Delta: %f\n", delta); /* read forward star offsets */ starsize = 0; fscanf(fsfile, "%i", &starsize); printf("Forward star size: %d\n", starsize); for (i=0; i<FSRADIUSMAX; i++) { fsindex[i] = 0; } numradius = 0; for (i=0; i<starsize; i++) { fscanf(fsfile, "%i %i %i", &fs[i].i, &fs[i].j, &fs[i].k); fs[i].d = sqrt(fs[i].i*fs[i].i + fs[i].j*fs[i].j + fs[i].k*fs[i].k); if ((numradius+1) < fs[i].d) { fsindex[numradius] = i; numradius++; } fs[i].d = delta * fs[i].d; } printf("Forward star offsets read\n"); for (i=0; i<FSRADIUSMAX; i++) { printf("numradius: %d, fsindex[%d]: %d\n", numradius, i, fsindex[i]); } /* read starting points */ fscanf(startfile, "%i", &numstart); // initialize travel times for all starting points for( s = 0; s < numstart; s++ ) { // prepare travel time volumes boxalloc( &ttboxes[s], nx, ny, nz ); boxsetall( ttboxes[s], INFINITY ); // set the starting point to have a travel time of 0 fscanf( startfile, "%i %i %i", &i, &j, &k ); boxput( ttboxes[s], i, j, k, 0 ); printf( "starting point %d: %d %d %d\n", s, i, j, k ); start[s].i = i; start[s].j = j; start[s].k = k; } printf("Starting points read\n"); int nDevices; cudaGetDeviceCount(&nDevices); printf("device: %d\n", nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } cudaRun(numstart, starsize); /* print travel times */ ttfile = fopen("output.tt","w"); if(ttfile == NULL) { printf("Can not open travel time output file: %s\n", "output.tt"); exit(1); } fprintf(ttfile, "%d %d %d\n", nx, ny, nz); for (s=0; s<numstart; s++) { fprintf(ttfile, "starting point: %d\n", s); for (i=0; i<nx; i++) { for (j=0; j<ny; j++) { for (k=0; k<nz; k++) { /* use %g for doubles */ fprintf(ttfile, "travel time for (%d,%d,%d): %f %d %d %d\n", i, j, k, boxget( ttboxes[s], i, j, k ), 0, 0, 0 ); } } } } } /* main */ void cudaRun( int numstart, int starsize ) { //constants const int blkNum = GRIDX * GRIDY * GRIDZ; const int blkSize = BLOCKX * BLOCKY * BLOCKZ; const int tNum = blkNum * blkSize; //host variables long anychange[DEVNUM][tNum]; int i, j, nx = vbox.box.nx, ny = vbox.box.ny, nz = vbox.box.nz, devIdx = 0, devNum = DEVNUM; int nCells = nx * ny * nz; size_t flatbytes = (size_t)nCells * sizeof(float); float ttflatbuffs[DEVNUM][nCells]; cudaError_t err; //Cuda variables dim3 gridDim(GRIDX,GRIDY,GRIDZ); dim3 blockDim(BLOCKX,BLOCKY,BLOCKZ); float *pd_vboxflat[DEVNUM]; float *ppd_ttboxflat[DEVNUM][STARTMAX]; long *pd_anychange[DEVNUM]; cudaStream_t streams[DEVNUM]; //allocate device memory for(devIdx = 0; devIdx < devNum; devIdx++){ cudaSetDevice(devIdx); err = cudaMalloc((void **)&pd_vboxflat[devIdx], flatbytes); if(err != cudaSuccess) printf("pd_vboxflat malloc error\n"); err = cudaMalloc((void **)&pd_anychange[devIdx], sizeof(long) * tNum); if(err != cudaSuccess) printf( "pd_anychange malloc error\n"); for(i=0; i<STARTMAX; i++){ err = cudaMalloc((void **)&ppd_ttboxflat[devIdx][i], flatbytes); if(err != cudaSuccess) printf("ppd_ttboxflat malloc error\n"); } } //lock up host memory for async transfer cudaHostRegister(fs, sizeof(fs), cudaHostRegisterDefault); cudaHostRegister(start, sizeof(start), cudaHostRegisterDefault); cudaHostRegister(vbox.box.flat, flatbytes, cudaHostRegisterDefault); for(i=0; i<STARTMAX; i++) cudaHostRegister(ttboxes[i].flat, flatbytes, cudaHostRegisterDefault); //async copy memory from host to device for(devIdx = 0; devIdx < devNum; devIdx++){ cudaSetDevice(devIdx); cudaStreamCreate(&streams[devIdx]); //copy fs to device err = cudaMemcpyToSymbolAsync(dc_fs, fs, sizeof(fs), 0, cudaMemcpyHostToDevice, streams[devIdx]); if(err != cudaSuccess) printf("dc_fs copy error\n"); printf("1\n"); //copy velosity box to device err = cudaMemcpyAsync(pd_vboxflat[devIdx], vbox.box.flat, flatbytes, cudaMemcpyHostToDevice, streams[devIdx]); if(err != cudaSuccess) printf( "pd_vboxflat copy error\n" ); printf( "2\n" ); //copy travel time boxes to device for(i=0; i<STARTMAX; i++){ err = cudaMemcpyAsync(ppd_ttboxflat[devIdx][i], ttboxes[i].flat, flatbytes, cudaMemcpyHostToDevice, streams[devIdx]); if(err != cudaSuccess) printf( "ppd_ttboxflat %d copy error\n", i ); } printf("3\n"); } cudaStreamSynchronize(0); //run algorithm double tSweep = 0.0, tChangeTrans = 0.0, tSum = 0.0, tMerge = 0.0, tBoxTrans = 0.0, tTotal = 0.0; for(i=0; i<numstart; i++){ long sweepNum = 0, changeSum = 1; while (changeSum) {//run until no changes changeSum = 0; sweepNum++; //run splited forward stars on different devices reset_and_start_timer(); for(devIdx=0; devIdx<devNum; devIdx++){ cudaSetDevice(devIdx); err = cudaMemset(pd_anychange[devIdx], 0, sizeof(long) * tNum); if(err != cudaSuccess) printf( "pd_anychange memset error\n"); cudaWorker<<<gridDim,blockDim>>>( nx, ny, nz, starSplit[devIdx], starSplit[devIdx+1]-1, //Note: change the range to the original starsize only reduce 5ms time. dc_fs, pd_vboxflat[devIdx], ppd_ttboxflat[devIdx][i], pd_anychange[devIdx] ); } cudaStreamSynchronize(0); //sync all devices tSweep = get_elapsed_msec(); if(err != cudaSuccess) //check error printf(" cudaGetLastError() returned %d: %s\n", err, cudaGetErrorString(err)); //pull back and check changes reset_and_start_timer(); for(devIdx=0; devIdx<devNum; devIdx++){ cudaSetDevice(devIdx); err = cudaMemcpyAsync(anychange[devIdx], pd_anychange[devIdx], sizeof(long) * tNum, cudaMemcpyDeviceToHost, streams[devIdx]); if(err != cudaSuccess) printf("anychange copy error: %d\n", err); } cudaStreamSynchronize(0); //sync all devices tChangeTrans = get_elapsed_msec(); reset_and_start_timer(); for(devIdx=0; devIdx<devNum; devIdx++) for(j = 0; j < tNum; j++) changeSum += anychange[devIdx][j]; tSum = get_elapsed_msec(); //copy travel time back from all devices and merge them and then put them back reset_and_start_timer(); for(devIdx = 0; devIdx < devNum; devIdx++){ cudaSetDevice(devIdx); err = cudaMemcpyAsync(ttflatbuffs[devIdx], ppd_ttboxflat[devIdx][i], flatbytes, cudaMemcpyDeviceToHost, streams[devIdx]); if(err != cudaSuccess) printf( "ppd_ttboxflat %d copy error\n", i ); } cudaStreamSynchronize(0); //sync all devices tBoxTrans = get_elapsed_msec(); reset_and_start_timer(); float res; for(j = 0; j < nCells; j++){ res = INFINITY; for(devIdx = 0; devIdx < devNum; devIdx++) res = fminf(res, ttflatbuffs[devIdx][j]); ttboxes[i].flat[j] = res; } tMerge = get_elapsed_msec(); for(devIdx = 0; devIdx < devNum; devIdx++){ cudaSetDevice(devIdx); err = cudaMemcpyAsync(ppd_ttboxflat[devIdx][i], ttboxes[i].flat, flatbytes, cudaMemcpyHostToDevice, streams[devIdx]); if(err != cudaSuccess) printf( "ppd_ttboxflat %d copy error\n", i ); } cudaStreamSynchronize(0); //sync all devices tTotal = tSweep + tChangeTrans + tSum + tBoxTrans + tMerge; printf(" start point: %d, sweep %d: %d changes, sweep %g, change trans %g\n\ sum %g, box trans %g, merg %g, total %g\n", i, sweepNum, changeSum, tSweep, tChangeTrans, tSum, tBoxTrans, tMerge, tTotal); } memcpy( ttboxes[i].flat, ttflatbuffs[0], flatbytes); } printf("6\n"); for(devIdx=0; devIdx<devNum; devIdx++){ cudaFree(pd_vboxflat[devIdx]); cudaFree(pd_anychange[devIdx]); for(i=0; i<STARTMAX; i++) err = cudaFree(ppd_ttboxflat[devIdx][i]); cudaStreamDestroy(streams[devIdx]); } } __global__ void cudaWorker( int d_nx, int d_ny, int d_nz, int d_starstart, int d_starend, struct FS *pd_fs, float *pd_vboxflat, float *pd_ttboxflat, long *pd_anychange ) { //int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; int d_blkid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int d_glbtid = d_blkid * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; //int blkSize = blockDim.x * blockDim.y * blockDim.z; pd_anychange[d_glbtid] = sweepXYZ( d_nx, d_ny, d_nz, d_starstart, d_starend, dc_fs, pd_vboxflat, pd_ttboxflat ); } __device__ int sweepXYZ( int nx, int ny, int nz, int starstart, int starstop, struct FS *fs, float *vboxflat, float *ttboxflat ) { int i, j, k, l, oi, oj, ok, iIdx, oIdx; float delay = 0.0, tt = 0.0, tto = 0.0, ttd = 0.0, ttod = 0.0; int sx = nz * ny; int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; __shared__ int change; if(d_blktid == 0) change = 0; __syncthreads(); i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if(i >= nx || j >= ny || k >= nz) return 0; for (l=starstart; l<starstop; l++) { /* find point in forward star based on offsets */ oi = i+fs[l].i; oj = j+fs[l].j; ok = k+fs[l].k; /* if (oi,oj,ok) is outside the boundaries, then skip */ if ((oi < 0) || (oi > nx-1) || (oj < 0) || (oj > ny-1) || (ok < 0) || (ok > nz-1)) { continue; } //pre-compute all the needed values iIdx = k+nz*j+i*sx; oIdx = ok+nz*oj+oi*sx; delay = fs[l].d * (vboxflat[iIdx] + vboxflat[oIdx]) / 2.0; tt = ttboxflat[iIdx]; tto = ttboxflat[oIdx]; ttd = tt + delay; ttod = tto + delay; //if the difference between two values is greater than delay //do value switches using pre-calculated values. if(fabs(tt-tto) > delay){ ttboxflat[iIdx] = fminf(tt, ttod); ttboxflat[oIdx] = fminf(tto, ttd); if(change == 0 && (ttod < tt || ttd < tto)) change = 1; } } return(change); } /* end sweepXYZ */
e374c744afe93a1131832f9fab28c036165a32d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <array/NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Sorted segments ops implementations template <typename T, typename I> static bool segmentIndicesValidate_(NDArray* indices, NDArray& aexpected, NDArray& aoutput) { return true; } bool segmentIndicesValidate(sd::LaunchContext* context , NDArray* indices, NDArray& expected, NDArray& output) { BUILD_DOUBLE_SELECTOR(output.dataType(), indices->dataType(), return segmentIndicesValidate_, (indices, expected, output), NUMERIC_TYPES, INDEXING_TYPES); } // -------------------------------------------------------------------------------------------------------------- // // Unsorted segment ops functors implementation // -------------------------------------------------------------------------------------------------------------- // template <typename I> static __global__ void unsortedSegmentIndexValidateKernel(I* indices, Nd4jLong* indicesShape, I expected, I* found) { __shared__ bool onlyTrue; __shared__ Nd4jLong len; if (threadIdx.x == 0) { onlyTrue = true; len = shape::length(indicesShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = gridDim.x * blockDim.x; for (int e = start; e < len && onlyTrue; e += step) { sd::math::atomics::nd4j_atomicMax(found, indices[e]); if (expected < *found) onlyTrue = false; } } template <typename I> static bool unsortedSegmentIndicesValidate_(sd::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) { output = expected; I found = output; I exp = expected; auto stream = context->getCudaStream(); I* devFound; hipMalloc(&devFound, sizeof(I)); hipMemcpy(devFound, &found, sizeof(I), hipMemcpyHostToDevice); hipLaunchKernelGGL(( unsortedSegmentIndexValidateKernel<I>), dim3(1), dim3(indices->lengthOf()), 128, *stream, reinterpret_cast<I*>(indices->specialBuffer()), indices->specialShapeInfo(), exp, devFound); hipMemcpy(&found, devFound, sizeof(I), hipMemcpyDeviceToHost); hipFree(devFound); output = found; return expected == output; } bool unsortedSegmentIndicesValidate(sd::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) { BUILD_SINGLE_SELECTOR(indices->dataType(), return unsortedSegmentIndicesValidate_, (context, indices, expected, output), INDEXING_TYPES); } // -------------------------------------------------------------------------------------------------------------- // // -------------------------------------------------------------------------------------------------------------- // // fill up segments starts and ends - splitted ordered case template <typename I> static __global__ void fillUpSegmentsKernel(void* indices, Nd4jLong* indexShape, int numClasses, int* classesRangesStart, int* classesRangesLenghts) { __shared__ I* idxBuf; __shared__ Nd4jLong idxLen; __shared__ int* result; if (threadIdx.x == 0) { idxBuf = reinterpret_cast<I*>(indices); idxLen = shape::length(indexShape); } __syncthreads(); auto tid = threadIdx.x + blockDim.x * blockIdx.x; auto step = blockDim.x * gridDim.x; for (auto j = tid; j < idxLen; j += step) { auto pos = idxBuf[j]; sd::math::atomics::nd4j_atomicMin<int>(&classesRangesStart[pos], (int)j); sd::math::atomics::nd4j_atomicAdd<int>(&classesRangesLenghts[pos], 1); } } // -------------------------------------------------------------------------------------------------------------- // template <typename I> static void fillUpSegments_(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) { dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32); int* begins = reinterpret_cast<int*>(classesRangesBegs.getSpecialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.getSpecialBuffer()); auto stream = classesRangesBegs.getContext()->getCudaStream(); hipLaunchKernelGGL(( fillUpSegmentsKernel<I>), dim3(dims.x), dim3(dims.y), dims.z, *stream , indices->specialBuffer(), indices->specialShapeInfo(), numClasses, begins, lengths); } // -------------------------------------------------------------------------------------------------------------- // void fillUpSegments(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) { BUILD_SINGLE_SELECTOR(indices->dataType(), fillUpSegments_, (indices, numClasses, classesRangesBegs, classesRangesLens), INDEXING_TYPES); } // -------------------------------------------------------------------------------------------------------------- // } } } // -------------------------------------------------------------------------------------------------------------- // // -------------------------------------------------------------------------------------------------------------- //
e374c744afe93a1131832f9fab28c036165a32d5.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <array/NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Sorted segments ops implementations template <typename T, typename I> static bool segmentIndicesValidate_(NDArray* indices, NDArray& aexpected, NDArray& aoutput) { return true; } bool segmentIndicesValidate(sd::LaunchContext* context , NDArray* indices, NDArray& expected, NDArray& output) { BUILD_DOUBLE_SELECTOR(output.dataType(), indices->dataType(), return segmentIndicesValidate_, (indices, expected, output), NUMERIC_TYPES, INDEXING_TYPES); } // -------------------------------------------------------------------------------------------------------------- // // Unsorted segment ops functors implementation // -------------------------------------------------------------------------------------------------------------- // template <typename I> static __global__ void unsortedSegmentIndexValidateKernel(I* indices, Nd4jLong* indicesShape, I expected, I* found) { __shared__ bool onlyTrue; __shared__ Nd4jLong len; if (threadIdx.x == 0) { onlyTrue = true; len = shape::length(indicesShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = gridDim.x * blockDim.x; for (int e = start; e < len && onlyTrue; e += step) { sd::math::atomics::nd4j_atomicMax(found, indices[e]); if (expected < *found) onlyTrue = false; } } template <typename I> static bool unsortedSegmentIndicesValidate_(sd::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) { output = expected; I found = output; I exp = expected; auto stream = context->getCudaStream(); I* devFound; cudaMalloc(&devFound, sizeof(I)); cudaMemcpy(devFound, &found, sizeof(I), cudaMemcpyHostToDevice); unsortedSegmentIndexValidateKernel<I><<<1, indices->lengthOf(), 128, *stream>>>(reinterpret_cast<I*>(indices->specialBuffer()), indices->specialShapeInfo(), exp, devFound); cudaMemcpy(&found, devFound, sizeof(I), cudaMemcpyDeviceToHost); cudaFree(devFound); output = found; return expected == output; } bool unsortedSegmentIndicesValidate(sd::LaunchContext* context , NDArray* indices, Nd4jLong expected, Nd4jLong& output) { BUILD_SINGLE_SELECTOR(indices->dataType(), return unsortedSegmentIndicesValidate_, (context, indices, expected, output), INDEXING_TYPES); } // -------------------------------------------------------------------------------------------------------------- // // -------------------------------------------------------------------------------------------------------------- // // fill up segments starts and ends - splitted ordered case template <typename I> static __global__ void fillUpSegmentsKernel(void* indices, Nd4jLong* indexShape, int numClasses, int* classesRangesStart, int* classesRangesLenghts) { __shared__ I* idxBuf; __shared__ Nd4jLong idxLen; __shared__ int* result; if (threadIdx.x == 0) { idxBuf = reinterpret_cast<I*>(indices); idxLen = shape::length(indexShape); } __syncthreads(); auto tid = threadIdx.x + blockDim.x * blockIdx.x; auto step = blockDim.x * gridDim.x; for (auto j = tid; j < idxLen; j += step) { auto pos = idxBuf[j]; sd::math::atomics::nd4j_atomicMin<int>(&classesRangesStart[pos], (int)j); sd::math::atomics::nd4j_atomicAdd<int>(&classesRangesLenghts[pos], 1); } } // -------------------------------------------------------------------------------------------------------------- // template <typename I> static void fillUpSegments_(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) { dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32); int* begins = reinterpret_cast<int*>(classesRangesBegs.getSpecialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.getSpecialBuffer()); auto stream = classesRangesBegs.getContext()->getCudaStream(); fillUpSegmentsKernel<I><<<dims.x, dims.y, dims.z, *stream >>>(indices->specialBuffer(), indices->specialShapeInfo(), numClasses, begins, lengths); } // -------------------------------------------------------------------------------------------------------------- // void fillUpSegments(NDArray* indices, Nd4jLong numClasses, NDArray& classesRangesBegs, NDArray& classesRangesLens) { BUILD_SINGLE_SELECTOR(indices->dataType(), fillUpSegments_, (indices, numClasses, classesRangesBegs, classesRangesLens), INDEXING_TYPES); } // -------------------------------------------------------------------------------------------------------------- // } } } // -------------------------------------------------------------------------------------------------------------- // // -------------------------------------------------------------------------------------------------------------- //
140749c2c637b96de9c5e75d3cf2b783066fb275.hip
// !!! This is a file automatically generated by hipify!!! #include "triangle.cuh" #include "vec3.cuh" #include "aabb.cuh" __host__ __device__ triangle::triangle(vec3 vertexone, vec3 vertextwo, vec3 vertexthree, //vec3 vn1, vec3 vn2, vec3 vn3, material *mat_ptr) { type = type_triangle; v1 = vertexone; v2 = vertextwo; v3 = vertexthree; //this->vn1 = cross(v3 - v1,v2-v1); this->vn1 = cross(v2 - v1, v3 - v1); this->vn1 /= this->vn1.length(); this->vn2 = this->vn1; this->vn3 = this->vn1; /*this->vn1 = vn1; this->vn2 = vn2; this->vn3 = vn3;*/ center = (v1 + v2 + v3) / 3; normal = cross(v2 - v1, v3 - v1); this->mat_ptr = mat_ptr; } __host__ __device__ bool triangle::bounding_box(float t0, float t1, aabb& box){ box._min[0] = ffmin(ffmin(v1[0], v2[0]), v3[0]); box._max[0] = ffmax(ffmax(v1[0], v2[0]), v3[0]); box._min[1] = ffmin(ffmin(v1[1], v2[1]), v3[1]); box._max[1] = ffmax(ffmax(v1[1], v2[1]), v3[1]); box._min[2] = ffmin(ffmin(v1[2], v2[2]), v3[2]); box._max[2] = ffmax(ffmax(v1[2], v2[2]), v3[2]); return true; } //Reference: https://github.com/tylermorganwall/rayrender/ __device__ bool triangle::hit(const ray& r, float t_min, float t_max, hit_record& rec){ vec3 edge1 = v2 - v1; vec3 edge2 = v3 - v1; vec3 pvec = cross(r.direction(), edge2); float det = dot(pvec, edge1); // no culling if (::fabs(det) < 1E-15) { return(false); } float invdet = 1.0 / det; vec3 tvec = r.origin() - v1; float u = dot(pvec, tvec) * invdet; if (u < 0.0 || u > 1.0) { return(false); } vec3 qvec = cross(tvec, edge1); float v = dot(qvec, r.direction()) * invdet; if (v < 0 || u + v > 1.0) { return(false); } float t = dot(qvec, edge2) * invdet; if (t < t_min || t > t_max) { return(false); } float w = 1 - u - v; rec.t = t; rec.p = r.point_at_parameter(t); //rec.u = u; //rec.v = v; rec.normal = w * vn1 + u * vn2 + v * vn3; rec.mat_ptr = mat_ptr; return(true); }
140749c2c637b96de9c5e75d3cf2b783066fb275.cu
#include "triangle.cuh" #include "vec3.cuh" #include "aabb.cuh" __host__ __device__ triangle::triangle(vec3 vertexone, vec3 vertextwo, vec3 vertexthree, //vec3 vn1, vec3 vn2, vec3 vn3, material *mat_ptr) { type = type_triangle; v1 = vertexone; v2 = vertextwo; v3 = vertexthree; //this->vn1 = cross(v3 - v1,v2-v1); this->vn1 = cross(v2 - v1, v3 - v1); this->vn1 /= this->vn1.length(); this->vn2 = this->vn1; this->vn3 = this->vn1; /*this->vn1 = vn1; this->vn2 = vn2; this->vn3 = vn3;*/ center = (v1 + v2 + v3) / 3; normal = cross(v2 - v1, v3 - v1); this->mat_ptr = mat_ptr; } __host__ __device__ bool triangle::bounding_box(float t0, float t1, aabb& box){ box._min[0] = ffmin(ffmin(v1[0], v2[0]), v3[0]); box._max[0] = ffmax(ffmax(v1[0], v2[0]), v3[0]); box._min[1] = ffmin(ffmin(v1[1], v2[1]), v3[1]); box._max[1] = ffmax(ffmax(v1[1], v2[1]), v3[1]); box._min[2] = ffmin(ffmin(v1[2], v2[2]), v3[2]); box._max[2] = ffmax(ffmax(v1[2], v2[2]), v3[2]); return true; } //Reference: https://github.com/tylermorganwall/rayrender/ __device__ bool triangle::hit(const ray& r, float t_min, float t_max, hit_record& rec){ vec3 edge1 = v2 - v1; vec3 edge2 = v3 - v1; vec3 pvec = cross(r.direction(), edge2); float det = dot(pvec, edge1); // no culling if (std::fabs(det) < 1E-15) { return(false); } float invdet = 1.0 / det; vec3 tvec = r.origin() - v1; float u = dot(pvec, tvec) * invdet; if (u < 0.0 || u > 1.0) { return(false); } vec3 qvec = cross(tvec, edge1); float v = dot(qvec, r.direction()) * invdet; if (v < 0 || u + v > 1.0) { return(false); } float t = dot(qvec, edge2) * invdet; if (t < t_min || t > t_max) { return(false); } float w = 1 - u - v; rec.t = t; rec.p = r.point_at_parameter(t); //rec.u = u; //rec.v = v; rec.normal = w * vn1 + u * vn2 + v * vn3; rec.mat_ptr = mat_ptr; return(true); }
a0b26e40d556e75d10005b99a787321bfbf9a86a.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> __forceinline__ __device__ unsigned warp_id() { // this is not equal to threadIdx.x / 32 unsigned ret; asm volatile ("mov.u32 %0, %warpid;" : "=r"(ret)); return ret; } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index // int bx = blockIdx.x; // int by = blockIdx.y; // Thread index // int tx = threadIdx.x; // int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block // float aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block // float aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A // float aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block // float bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B // float bStep = BLOCK_SIZE * wB; float sum = 00.1; float fsum = 00.2; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix // for (int a = aBegin, b = bBegin; // a <= aEnd; // a += aStep, b += bStep) // { // Declaration of the shared memory array As used to // store the sub-matrix of A // __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix // As[ty][tx] = A[a + wA * ty + tx]; // Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded //__syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix double result; float qqq =0; float x_counter = 0.0; asm(".reg .f32 t1;\n\t"); asm(".reg .f64 t2, t3, t4;\n\t"); #pragma unroll if (0) { for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 1000000) { asm("mul.f32 %0, %3, t1, %2;\n\t" "mul.f64 t2, t3, t4;\n\t" "mul.f32 t1, %0, t1, %3;\n\t" "mul.f32 t1, t1, t1, %2;\n\t" "mul.f64 t2, t3, t4;\n\t" "mul.f32 t1, %0, t1, %0;\n\t" "mul.f32 %0, t1, t1, %0;\n\t" "mul.f32 t1, %0, t1, %0;\n\t" "mul.f32 t1, t1, t1, %2;\n\t" "mul.f32 t1, %0, t1, %0;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f32 %0, t1, %0, %3;\n\t" "mul.f32 t1, %0, %3, %0;\n\t" "mul.f32 t1, t1, %2, %0;\n\t" "mul.f32 t1, %0, %0, %3;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f32 %0, t1, %0, t1;\n\t" "mul.f32 t1, t1, %0, %0;\n\t" "mul.f64 %1, t2, t4;\n\t" "mul.f32 %0, t1, %0, t1;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; // } //qqq += k*k; //sum += qqq*qqq/(qqq*2.3); //sum += (a+b+k)*qqq; //Csub += As[ty][k] * Bs[k][tx] + sum; } } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration //__syncthreads(); } if (0) { //if (threadIdx.y % 2 == 0) { //for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 10000000) { asm("mul.f64 t2, %1, t4;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f32 t1, t1, %0;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f32 t1, t1, %0;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f64 %1, t2, t4;\n\t" "mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; } //} } else if (0) { // } else if (warp_id() % 3 == 1) { //for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 10000000) { asm("mul.f64 t2, %1, t4;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f64 t3, t3, t2;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f64 t2, t3, t4;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f64 t3, t3, t2;\n\t" "mul.f64 %1, t3, t2;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; } //} }else { //for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 10000000) { asm("mul.f32 t1, %0, t1;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f32 t1, %0, %0;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f32 t1, t1, %2;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; } //} } // Write the block sub-matrix to device memory; // each thread writes one element //int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //C[c + wB * ty + tx] = Csub; C[0] = qqq+result; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA_int(int *C, int *A, int *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; int sum = 0; int fsum = 0; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded //__syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll int qqq =0; int x_counter = 0; asm(".reg .u32 t1;\n\t"); for (int k = 0; k < BLOCK_SIZE; ++k) { while (x_counter < 1000000) { asm("mul.u32 %0, %1, %2;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, %0, t1;\n\t": "=r"(qqq): "r"(As[ty][k]), "r"(Bs[k][tx]) ); x_counter += 1; } //qqq += k*k; //fsum += qqq*qqq/(qqq*3); //sum += a+b+k; //Csub += As[ty][k] * Bs[k][tx]+sum; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration //__syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element //int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //C[c + wB * ty + tx] = Csub; } void constantInit_int(int *data, int size, int val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { int streamNum = 1; if (checkCmdLineFlag(argc, (const char **)argv, "streams")) { streamNum = getCmdLineArgumentInt(argc, (const char **)argv, "streams"); } // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; unsigned int mem_size_A_double = sizeof(int) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; unsigned int mem_size_B_double = sizeof(int) * size_B; float *h_B = (float *)malloc(mem_size_B); int *h_A_double = (int *)malloc(mem_size_A_double); int *h_B_double = (int *)malloc(mem_size_B_double); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); constantInit_int(h_A_double, size_A, 2); constantInit_int(h_B_double, size_B, 23); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate device memory int *d_A_double, *d_B_double, *d_C_double; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); unsigned int mem_size_C_double = dimsC.x * dimsC.y * sizeof(int); int *h_C_double = (int *) malloc(mem_size_C_double); // allocate and initialize an array of stream handles hipStream_t *streams = (hipStream_t *) malloc(streamNum * sizeof(hipStream_t)); for (int i = 0; i < streamNum; i++) { checkCudaErrors(hipStreamCreate(&(streams[i]))); } if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; error = hipMalloc((void **) &d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_A_double, mem_size_A_double); if (error != hipSuccess) { printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_B_double, mem_size_B_double); if (error != hipSuccess) { printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_C_double, mem_size_C_double); if (error != hipSuccess) { printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A_double, h_A_double, mem_size_A_double, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B_double, h_B_double, mem_size_B_double, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { // matrixMulCUDA<16><<< grid, threads, 0,streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { // matrixMulCUDA<32><<< grid, threads, 0, streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 4; for (int j = 0; j < nIter; j++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads),0, streams[j%streamNum] , d_C, d_A, d_B, dimsA.x, dimsB.x); } else { hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads),0, streams[j%streamNum] , d_C, d_A, d_B, dimsA.x, dimsB.x); } if (block_size == 16) { //matrixMulCUDA_int<16><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x); } else { // matrixMulCUDA_int<32><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x); } } // Record the start event error = hipEventRecord(start, NULL); // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A_double); free(h_B_double); free(h_C_double); hipFree(d_A_double); hipFree(d_B_double); hipFree(d_C_double); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); hipSetDevice(devID); } hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } if ((deviceProp.concurrentKernels == 0)) { printf("> GPU does not support concurrent kernel execution\n"); printf(" CUDA kernel runs will be serialized\n"); } printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); int block_size = 32; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
a0b26e40d556e75d10005b99a787321bfbf9a86a.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> __forceinline__ __device__ unsigned warp_id() { // this is not equal to threadIdx.x / 32 unsigned ret; asm volatile ("mov.u32 %0, %warpid;" : "=r"(ret)); return ret; } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index // int bx = blockIdx.x; // int by = blockIdx.y; // Thread index // int tx = threadIdx.x; // int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block // float aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block // float aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A // float aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block // float bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B // float bStep = BLOCK_SIZE * wB; float sum = 00.1; float fsum = 00.2; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix // for (int a = aBegin, b = bBegin; // a <= aEnd; // a += aStep, b += bStep) // { // Declaration of the shared memory array As used to // store the sub-matrix of A // __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix // As[ty][tx] = A[a + wA * ty + tx]; // Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded //__syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix double result; float qqq =0; float x_counter = 0.0; asm(".reg .f32 t1;\n\t"); asm(".reg .f64 t2, t3, t4;\n\t"); #pragma unroll if (0) { for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 1000000) { asm("mul.f32 %0, %3, t1, %2;\n\t" "mul.f64 t2, t3, t4;\n\t" "mul.f32 t1, %0, t1, %3;\n\t" "mul.f32 t1, t1, t1, %2;\n\t" "mul.f64 t2, t3, t4;\n\t" "mul.f32 t1, %0, t1, %0;\n\t" "mul.f32 %0, t1, t1, %0;\n\t" "mul.f32 t1, %0, t1, %0;\n\t" "mul.f32 t1, t1, t1, %2;\n\t" "mul.f32 t1, %0, t1, %0;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f32 %0, t1, %0, %3;\n\t" "mul.f32 t1, %0, %3, %0;\n\t" "mul.f32 t1, t1, %2, %0;\n\t" "mul.f32 t1, %0, %0, %3;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f32 %0, t1, %0, t1;\n\t" "mul.f32 t1, t1, %0, %0;\n\t" "mul.f64 %1, t2, t4;\n\t" "mul.f32 %0, t1, %0, t1;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; // } //qqq += k*k; //sum += qqq*qqq/(qqq*2.3); //sum += (a+b+k)*qqq; //Csub += As[ty][k] * Bs[k][tx] + sum; } } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration //__syncthreads(); } if (0) { //if (threadIdx.y % 2 == 0) { //for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 10000000) { asm("mul.f64 t2, %1, t4;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f32 t1, t1, %0;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f32 t1, t1, %0;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f64 %1, t2, t4;\n\t" "mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; } //} } else if (0) { // } else if (warp_id() % 3 == 1) { //for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 10000000) { asm("mul.f64 t2, %1, t4;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f64 t3, t3, t2;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f64 t2, t3, t4;\n\t" "mul.f64 t4, t3, t2;\n\t" "mul.f64 t2, t2, t4;\n\t" "mul.f64 t3, t3, t2;\n\t" "mul.f64 %1, t3, t2;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; } //} }else { //for (int k = 0; k < BLOCK_SIZE; ++k) { //for (float k = 0.1; k < 32.9; k = k+0.99) //{ while (x_counter < 10000000) { asm("mul.f32 t1, %0, t1;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f32 t1, %0, %0;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %0, t1, %0;\n\t" "mul.f32 t1, t1, %2;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %2, t1, %0;\n\t" "mul.f32 %0, t1, %2;\n\t": "=f"(qqq), "=d"(result): "f"(sum), "f"(fsum) ); x_counter += 1.0; } //} } // Write the block sub-matrix to device memory; // each thread writes one element //int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //C[c + wB * ty + tx] = Csub; C[0] = qqq+result; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA_int(int *C, int *A, int *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; int sum = 0; int fsum = 0; // Csub is used to store the element of the block sub-matrix // that is computed by the thread int Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded //__syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll int qqq =0; int x_counter = 0; asm(".reg .u32 t1;\n\t"); for (int k = 0; k < BLOCK_SIZE; ++k) { while (x_counter < 1000000) { asm("mul.u32 %0, %1, %2;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, %0, %1;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 t1, t1, %2;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, t1, %0;\n\t" "mul.u32 t1, %0, %0;\n\t" "mul.u32 %0, %0, t1;\n\t": "=r"(qqq): "r"(As[ty][k]), "r"(Bs[k][tx]) ); x_counter += 1; } //qqq += k*k; //fsum += qqq*qqq/(qqq*3); //sum += a+b+k; //Csub += As[ty][k] * Bs[k][tx]+sum; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration //__syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element //int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //C[c + wB * ty + tx] = Csub; } void constantInit_int(int *data, int size, int val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { int streamNum = 1; if (checkCmdLineFlag(argc, (const char **)argv, "streams")) { streamNum = getCmdLineArgumentInt(argc, (const char **)argv, "streams"); } // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; unsigned int mem_size_A_double = sizeof(int) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; unsigned int mem_size_B_double = sizeof(int) * size_B; float *h_B = (float *)malloc(mem_size_B); int *h_A_double = (int *)malloc(mem_size_A_double); int *h_B_double = (int *)malloc(mem_size_B_double); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); constantInit_int(h_A_double, size_A, 2); constantInit_int(h_B_double, size_B, 23); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate device memory int *d_A_double, *d_B_double, *d_C_double; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); unsigned int mem_size_C_double = dimsC.x * dimsC.y * sizeof(int); int *h_C_double = (int *) malloc(mem_size_C_double); // allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *) malloc(streamNum * sizeof(cudaStream_t)); for (int i = 0; i < streamNum; i++) { checkCudaErrors(cudaStreamCreate(&(streams[i]))); } if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_A_double, mem_size_A_double); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B_double, mem_size_B_double); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C_double, mem_size_C_double); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A_double, h_A_double, mem_size_A_double, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B_double, h_B_double, mem_size_B_double, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { // matrixMulCUDA<16><<< grid, threads, 0,streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { // matrixMulCUDA<32><<< grid, threads, 0, streams[0] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 4; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16><<< grid, threads,0, streams[j%streamNum] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32><<< grid, threads,0, streams[j%streamNum] >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } if (block_size == 16) { //matrixMulCUDA_int<16><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x); } else { // matrixMulCUDA_int<32><<< grid, threads,0, streams[(j+1)%streamNum] >>>(d_C_double, d_A_double, d_B_double, dimsA.x, dimsB.x); } } // Record the start event error = cudaEventRecord(start, NULL); // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A_double); free(h_B_double); free(h_C_double); cudaFree(d_A_double); cudaFree(d_B_double); cudaFree(d_C_double); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); cudaSetDevice(devID); } cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } if ((deviceProp.concurrentKernels == 0)) { printf("> GPU does not support concurrent kernel execution\n"); printf(" CUDA kernel runs will be serialized\n"); } printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); int block_size = 32; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
846458073eda25f53009158df4b4e2650685ff06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** Napisati CUDA program koji trazi maksimalni nezavisni skup vrhova u grafu koritenjem paralelnog algoritma koji koristi sluajne brojeve. */ #include<iostream> #include<stdio.h> #include<cstdlib> #include<string> #include<cuda_runtime.h> #include<cuda.h> #include<hiprand/hiprand.h> #include<rocblas.h> #include<fstream> #include<time.h> #include<vector> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<time.h> #include"pseudo_generator/myrand.h" using namespace std; #define CUDA_CALL(x) do { if((x)!=hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) /** Generator slucajnih brojeva koji koristi mtrand */ int moj_generator(float* hostData, float *devData, int numElements) { generator_realnih_brojeva(hostData, numElements); CUDA_CALL(hipMemcpy(devData, hostData, numElements * sizeof(float), hipMemcpyHostToDevice)); return EXIT_SUCCESS; } /** * Host funkcija koja provjerava koliko je ostalo * neodabranih vrhova */ bool findZeros(int* polje, int n) { for(int i = 0; i < n; i++) if(polje[i] == 0) return true; return false; } /** * Umnozak pseudo-brojeva i vremena */ __global__ void bestRand(float *devData, int* n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < *n) devData[index] = devData[index]*((int)clock()%10); } /** * Curand host funkcija. Generira pseudo-slucajne brojeve <0,1> * uniformna razdioba */ int create_pseud_numbers(float *hostData, float *devData, int numElements) { size_t n = numElements; hiprandGenerator_t gen; //int *nn; /* Create pseudo-random number generator */ CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); /* Set seed */ CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); /* Generate n floats on device */ CURAND_CALL(hiprandGenerateUniform(gen, devData, n)); /* Copy device memory to host */ CUDA_CALL(hipMemcpy(hostData, devData, n * sizeof(float), hipMemcpyDeviceToHost)); /* Cleanup */ CURAND_CALL(hiprandDestroyGenerator(gen)); return EXIT_SUCCESS; } /** * Device funkcija. Algoritam za pronalazenje maksimalnog nezavisnog skupa vrhova. * Ulazni parametri : polje veza, polje pokazivaca na veze za svaki vrh po jedan pointer na polje veze, * hipLaunchKernelGGL(( algoritam), dim3(numElements/128 + 1) ,dim3(128), 0, 0, DindElements, DptrVector, Dizbaceni, devData, Dveze_size, Dptr_size); CUDA_CALL(hipMemcpy(izbaceni, Dizbaceni, numElements * sizeof(int), hipMemcpyDeviceToHost)); polje izbaceni, svaki thread zapise tko je izbacen sa -1 a ako je on trazeni postavi 1 */ __global__ void algoritam(int* veze, int* ptr, int* izbaceni, float *devData, int* veze_size, int* ptr_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; //int c =(int)clock()%(index*100000000); //index = (index + c)%*ptr_size; //izbaceni[index] = c; //provjera indekasa // Ako ti je index u rangu i ako nisi vec izbacen if(index < *ptr_size - 1 && izbaceni[index] != -1) { int provjera = 1; int start = ptr[index]; //pocetak u vezama int end = ptr[index + 1]; // kraj u vezama for(int i = start; i < end; i++) { // Ako je netko dobio vecu tezinu i ako taj nije izbacen kao mogucnost if(devData[index] >= devData[veze[i] - 1] && izbaceni[veze[i] - 1] != -1) provjera = 0; } // Ako je prosao provjeru if(provjera) { izbaceni[index] = 1; // postavi da je index dobar for(int i = start; i< end; i++) izbaceni[veze[i] - 1 ] = -1; // sve susjede izbaci kao mogucnost } } } int main(int argc, const char* argv[]) { // Provjera da li su dobri ulazni parametri if( argc != 3) { cerr<<"Krivi ulazni parametri"<<endl; return EXIT_FAILURE; } /***************** UCITAVANJE PODATAKA *****************************/ int numElements; // broj vrhova vector<int> indElements; // vektor susjedstva ( veze ) vector<int> ptrVector; // pointeri vrhova u vektoru susjedstva ifstream myFile (argv[1]); // Ako je file dobar prepisi ga u vektore if(myFile.is_open()) { int cnt = 0; myFile >> numElements; ptrVector.push_back(cnt); while(myFile.good()) { int v; myFile >> v; if(!myFile.good()) break; if(v == 0) ptrVector.push_back(cnt); else { indElements.push_back(v); cnt++; } } } else { cerr<<"Pogresno ime datoteke"<<endl; return EXIT_FAILURE; } /**************************************************************************/ /* Provjera da li je sve procitano korektno*/ /*cout<<numElements<<endl; for(int i(0); i < ptrVector.size(); i++) cout<<ptrVector[i]<<" "; cout<<endl; for(int i(0); i < indElements.size(); i++) cout<<indElements[i]<<" "; cout<<endl; */ /* Priprema za device*/ /*****************************************************************/ int* HindElements = &indElements[0]; // iz vektora u polje int* HptrVector = &ptrVector[0]; // iz vektora u polje int Hizbaceni[numElements]; // Inicijalno sve na 0 jer su svi vrhovi raspolozivi za koristenje for(int i(0); i < numElements; i++) Hizbaceni[i] = 0; float * hostData, *devData; // polja za pseudo-slucajne brojeve // alokacija za generator pseudo brojeva hostData = (float *)calloc(numElements, sizeof(float)); CUDA_CALL(hipMalloc((void **)&devData, numElements*sizeof(float))); /* Kreiranje slucajnih brojeva sa testiranjem */ /*********************************************************************/ //create_pseud_numbers(hostData, devData, numElements); //moj_generator(hostData, devData, numElements); /* Prikaz rezultata */ /* for( int i = 0; i < numElements; i++) { printf("%1.4f ", hostData[i]); } cout<<endl; */ /*************************************************************************/ // Alokacija memorija za glavni program (algoritam) int Hveze_size = indElements.size(), Hptr_size = ptrVector.size(); // pomocne varijable int *Dveze_size, *Dptr_size; int *DindElements, *DptrVector, *Dizbaceni; int izbaceni[numElements]; CUDA_CALL(hipMalloc((void **)&DindElements, indElements.size()*sizeof(int))); CUDA_CALL(hipMalloc((void **)&DptrVector, ptrVector.size()*sizeof(int))); CUDA_CALL(hipMalloc((void **)&Dizbaceni, numElements*sizeof(int))); CUDA_CALL(hipMalloc((void**)&Dveze_size, sizeof(int))); CUDA_CALL(hipMalloc((void**)&Dptr_size, sizeof(int))); CUDA_CALL(hipMemcpy(DindElements, HindElements, indElements.size() * sizeof(int), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(DptrVector, HptrVector, ptrVector.size() * sizeof(int), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(Dizbaceni, &Hizbaceni, numElements * sizeof(int), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(Dveze_size, &Hveze_size, sizeof(int), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(Dptr_size, &Hptr_size, sizeof(int), hipMemcpyHostToDevice)); /**********************************************************************************/ // CUDA grid //dim3 threadsPerBlock(16, 16); //dim3 numBlocks(numElements / threadsPerBlock.x, numElements / threadsPerBlock.y); /***********************************************************************************/ // Algoritam /*--------------------------------------------------------------------------------------------*/ do{ create_pseud_numbers(hostData, devData, numElements); //moj_generator(hostData, devData, numElements); hipLaunchKernelGGL(( algoritam), dim3(numElements/128 + 1) ,dim3(128), 0, 0, DindElements, DptrVector, Dizbaceni, devData, Dveze_size, Dptr_size); CUDA_CALL(hipMemcpy(izbaceni, Dizbaceni, numElements * sizeof(int), hipMemcpyDeviceToHost)); }while(findZeros(izbaceni, numElements)); /*---------------------------------------------------------------------------------------------*/ /************ ISPIS ***************************************/ ofstream myFileOut; char path[80]; strcpy(path,"rezultati/"); strcat(path,argv[2]); cout<<path<<endl; myFileOut.open(path); // ispisi matrice odabranih i izbacenih vrhova 1 -> odabrani, -1 -> izbaceni for( int k = 0; k < numElements; k++) { cout<<k+1<<" : "<<izbaceni[k]<<endl; myFileOut<<k+1<<" : "<<izbaceni[k]<<endl; } /********************************************************/ // Oslobadanje memorije na hostu i divace-u free(hostData); CUDA_CALL(hipFree(devData)); CUDA_CALL(hipFree(DindElements)); CUDA_CALL(hipFree(DptrVector)); CUDA_CALL(hipFree(Dizbaceni)); CUDA_CALL(hipFree(Dveze_size)); CUDA_CALL(hipFree(Dptr_size)); myFile.close(); myFileOut.close(); return 0; }
846458073eda25f53009158df4b4e2650685ff06.cu
/** Napisati CUDA program koji trazi maksimalni nezavisni skup vrhova u grafu korištenjem paralelnog algoritma koji koristi slučajne brojeve. */ #include<iostream> #include<stdio.h> #include<cstdlib> #include<string> #include<cuda_runtime.h> #include<cuda.h> #include<curand.h> #include<cublas_v2.h> #include<fstream> #include<time.h> #include<vector> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<time.h> #include"pseudo_generator/myrand.h" using namespace std; #define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) /** Generator slucajnih brojeva koji koristi mtrand */ int moj_generator(float* hostData, float *devData, int numElements) { generator_realnih_brojeva(hostData, numElements); CUDA_CALL(cudaMemcpy(devData, hostData, numElements * sizeof(float), cudaMemcpyHostToDevice)); return EXIT_SUCCESS; } /** * Host funkcija koja provjerava koliko je ostalo * neodabranih vrhova */ bool findZeros(int* polje, int n) { for(int i = 0; i < n; i++) if(polje[i] == 0) return true; return false; } /** * Umnozak pseudo-brojeva i vremena */ __global__ void bestRand(float *devData, int* n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < *n) devData[index] = devData[index]*((int)clock()%10); } /** * Curand host funkcija. Generira pseudo-slucajne brojeve <0,1> * uniformna razdioba */ int create_pseud_numbers(float *hostData, float *devData, int numElements) { size_t n = numElements; curandGenerator_t gen; //int *nn; /* Create pseudo-random number generator */ CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); /* Set seed */ CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); /* Generate n floats on device */ CURAND_CALL(curandGenerateUniform(gen, devData, n)); /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostData, devData, n * sizeof(float), cudaMemcpyDeviceToHost)); /* Cleanup */ CURAND_CALL(curandDestroyGenerator(gen)); return EXIT_SUCCESS; } /** * Device funkcija. Algoritam za pronalazenje maksimalnog nezavisnog skupa vrhova. * Ulazni parametri : polje veza, polje pokazivaca na veze za svaki vrh po jedan pointer na polje veze, * algoritam<<<numElements/128 + 1 ,128>>>(DindElements, DptrVector, Dizbaceni, devData, Dveze_size, Dptr_size); CUDA_CALL(cudaMemcpy(izbaceni, Dizbaceni, numElements * sizeof(int), cudaMemcpyDeviceToHost)); polje izbaceni, svaki thread zapise tko je izbacen sa -1 a ako je on trazeni postavi 1 */ __global__ void algoritam(int* veze, int* ptr, int* izbaceni, float *devData, int* veze_size, int* ptr_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; //int c =(int)clock()%(index*100000000); //index = (index + c)%*ptr_size; //izbaceni[index] = c; //provjera indekasa // Ako ti je index u rangu i ako nisi vec izbacen if(index < *ptr_size - 1 && izbaceni[index] != -1) { int provjera = 1; int start = ptr[index]; //pocetak u vezama int end = ptr[index + 1]; // kraj u vezama for(int i = start; i < end; i++) { // Ako je netko dobio vecu tezinu i ako taj nije izbacen kao mogucnost if(devData[index] >= devData[veze[i] - 1] && izbaceni[veze[i] - 1] != -1) provjera = 0; } // Ako je prosao provjeru if(provjera) { izbaceni[index] = 1; // postavi da je index dobar for(int i = start; i< end; i++) izbaceni[veze[i] - 1 ] = -1; // sve susjede izbaci kao mogucnost } } } int main(int argc, const char* argv[]) { // Provjera da li su dobri ulazni parametri if( argc != 3) { cerr<<"Krivi ulazni parametri"<<endl; return EXIT_FAILURE; } /***************** UCITAVANJE PODATAKA *****************************/ int numElements; // broj vrhova vector<int> indElements; // vektor susjedstva ( veze ) vector<int> ptrVector; // pointeri vrhova u vektoru susjedstva ifstream myFile (argv[1]); // Ako je file dobar prepisi ga u vektore if(myFile.is_open()) { int cnt = 0; myFile >> numElements; ptrVector.push_back(cnt); while(myFile.good()) { int v; myFile >> v; if(!myFile.good()) break; if(v == 0) ptrVector.push_back(cnt); else { indElements.push_back(v); cnt++; } } } else { cerr<<"Pogresno ime datoteke"<<endl; return EXIT_FAILURE; } /**************************************************************************/ /* Provjera da li je sve procitano korektno*/ /*cout<<numElements<<endl; for(int i(0); i < ptrVector.size(); i++) cout<<ptrVector[i]<<" "; cout<<endl; for(int i(0); i < indElements.size(); i++) cout<<indElements[i]<<" "; cout<<endl; */ /* Priprema za device*/ /*****************************************************************/ int* HindElements = &indElements[0]; // iz vektora u polje int* HptrVector = &ptrVector[0]; // iz vektora u polje int Hizbaceni[numElements]; // Inicijalno sve na 0 jer su svi vrhovi raspolozivi za koristenje for(int i(0); i < numElements; i++) Hizbaceni[i] = 0; float * hostData, *devData; // polja za pseudo-slucajne brojeve // alokacija za generator pseudo brojeva hostData = (float *)calloc(numElements, sizeof(float)); CUDA_CALL(cudaMalloc((void **)&devData, numElements*sizeof(float))); /* Kreiranje slucajnih brojeva sa testiranjem */ /*********************************************************************/ //create_pseud_numbers(hostData, devData, numElements); //moj_generator(hostData, devData, numElements); /* Prikaz rezultata */ /* for( int i = 0; i < numElements; i++) { printf("%1.4f ", hostData[i]); } cout<<endl; */ /*************************************************************************/ // Alokacija memorija za glavni program (algoritam) int Hveze_size = indElements.size(), Hptr_size = ptrVector.size(); // pomocne varijable int *Dveze_size, *Dptr_size; int *DindElements, *DptrVector, *Dizbaceni; int izbaceni[numElements]; CUDA_CALL(cudaMalloc((void **)&DindElements, indElements.size()*sizeof(int))); CUDA_CALL(cudaMalloc((void **)&DptrVector, ptrVector.size()*sizeof(int))); CUDA_CALL(cudaMalloc((void **)&Dizbaceni, numElements*sizeof(int))); CUDA_CALL(cudaMalloc((void**)&Dveze_size, sizeof(int))); CUDA_CALL(cudaMalloc((void**)&Dptr_size, sizeof(int))); CUDA_CALL(cudaMemcpy(DindElements, HindElements, indElements.size() * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(DptrVector, HptrVector, ptrVector.size() * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Dizbaceni, &Hizbaceni, numElements * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Dveze_size, &Hveze_size, sizeof(int), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(Dptr_size, &Hptr_size, sizeof(int), cudaMemcpyHostToDevice)); /**********************************************************************************/ // CUDA grid //dim3 threadsPerBlock(16, 16); //dim3 numBlocks(numElements / threadsPerBlock.x, numElements / threadsPerBlock.y); /***********************************************************************************/ // Algoritam /*--------------------------------------------------------------------------------------------*/ do{ create_pseud_numbers(hostData, devData, numElements); //moj_generator(hostData, devData, numElements); algoritam<<<numElements/128 + 1 ,128>>>(DindElements, DptrVector, Dizbaceni, devData, Dveze_size, Dptr_size); CUDA_CALL(cudaMemcpy(izbaceni, Dizbaceni, numElements * sizeof(int), cudaMemcpyDeviceToHost)); }while(findZeros(izbaceni, numElements)); /*---------------------------------------------------------------------------------------------*/ /************ ISPIS ***************************************/ ofstream myFileOut; char path[80]; strcpy(path,"rezultati/"); strcat(path,argv[2]); cout<<path<<endl; myFileOut.open(path); // ispisi matrice odabranih i izbacenih vrhova 1 -> odabrani, -1 -> izbaceni for( int k = 0; k < numElements; k++) { cout<<k+1<<" : "<<izbaceni[k]<<endl; myFileOut<<k+1<<" : "<<izbaceni[k]<<endl; } /********************************************************/ // Oslobadanje memorije na hostu i divace-u free(hostData); CUDA_CALL(cudaFree(devData)); CUDA_CALL(cudaFree(DindElements)); CUDA_CALL(cudaFree(DptrVector)); CUDA_CALL(cudaFree(Dizbaceni)); CUDA_CALL(cudaFree(Dveze_size)); CUDA_CALL(cudaFree(Dptr_size)); myFile.close(); myFileOut.close(); return 0; }
95d180a58cba69d2038841d4816997f3acc22255.hip
// !!! This is a file automatically generated by hipify!!! #include "opencv2/gpu/device/common.hpp" #include <opencv2/core/core.hpp> using namespace cv::gpu; #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "../Matting/matting.h" #if HAVE_GPU ==1 #define BLOCK_WIDE 64 #define BLOCK_HIGH 8 #define alpha_top 40 #define alpha_bottom 40 #define alpha_left 120 #define alpha_right 120 texture <float, hipTextureType2D, hipReadModeElementType> source_tex(false, hipFilterModePoint, hipAddressModeClamp); namespace cv { namespace gpu { namespace device { /*__device__ const float motion_TH_f = motion_TH / 255.0; __device__ const float static_SPEED_f = static_SPEED / 255.0; __device__ const float long_SPEED_f = long_SPEED / 255.0; __device__ const float luma_offset_f = luma_offset / 255.0f; __device__ const float u_gain_f = u_gain; __device__ const float v_gain_f = v_gain;*/ __constant__ TuningParaFloat Const; __constant__ HostPara host_para; __global__ void trace_bg_kernel(PtrStepSz<float> motion_diff_rgb_filted0, PtrStepSz<float> motion_diff_rgb_filted1, PtrStepSz<float> motion_diff_rgb_filted2, PtrStepSz<float3> frame_yuv, PtrStepSz<float3> bg_yuv, PtrStepSz<float3> bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body) { extern __shared__ float smem[]; typename float * gray = smem; unsigned int gray_idx = threadIdx.y * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * (blockDim.y-2) + threadIdx.y; unsigned int x = blockIdx.x * (blockDim.x-2) + threadIdx.x; if (y < static_num.rows && x < static_num.cols) { gray[gray_idx] = frame_yuv.ptr(y)[x].x; __syncthreads(); if (threadIdx.x != 0 && threadIdx.y != 0 && threadIdx.x != blockDim.x - 1 && threadIdx.y != blockDim.y - 1 && y + 1<static_num.rows && x + 1<static_num.cols) { float edge_offset = MAX(fabs(gray[gray_idx - blockDim.x - 1] - gray[gray_idx + blockDim.x + 1]), fabs(gray[gray_idx - blockDim.x + 1] - gray[gray_idx + blockDim.x - 1])) / 2; float motion_diff = fabs(motion_diff_rgb_filted0.ptr(y)[x]) + fabs(motion_diff_rgb_filted1.ptr(y)[x]) + fabs(motion_diff_rgb_filted2.ptr(y)[x]); unsigned char static_num_reg = static_num.ptr(y)[x]; if (motion_diff < edge_offset + Const.motion_TH_f) static_num_reg = MIN(static_num_reg + 1, Const.static_MAX); else static_num_reg = 0; static_num.ptr(y)[x] = static_num_reg; float3 bg_yuv_reg = bg_yuv.ptr(y)[x]; if (fabs(bg_yuv_reg.x) <= 0.001f && fabs(bg_yuv_reg.y - 1.0f) <= 0.001f && fabs(bg_yuv_reg.z) <=0.001f) { if (static_num_reg>= Const.init_static_num) bg_yuv.ptr(y)[x] = frame_yuv.ptr(y)[x]; } else { float update_speed; if (is_bg.ptr(y)[x] && static_num_reg >= Const.static_NUM) update_speed = Const.static_SPEED_f; else if (is_body.ptr(y)[x] == 0 && static_num_reg >= Const.long_static_NUM) update_speed = Const.long_SPEED_f; else update_speed = 0; float3 bg_diff_yuv_reg = bg_diff_yuv.ptr(y)[x]; bg_yuv_reg.x = (bg_diff_yuv_reg.x > 0) ? (bg_yuv_reg.x + update_speed) : (bg_yuv_reg.x - update_speed); bg_yuv_reg.y = (bg_diff_yuv_reg.y > 0) ? (bg_yuv_reg.y + update_speed) : (bg_yuv_reg.y - update_speed); bg_yuv_reg.z = (bg_diff_yuv_reg.z > 0) ? (bg_yuv_reg.z + update_speed) : (bg_yuv_reg.z - update_speed); bg_yuv.ptr(y)[x] = bg_yuv_reg; } } } } __global__ void update_mask_bg_kernel(PtrStepSz<float> bg_diff_filted0, PtrStepSz<float> bg_diff_filted1, PtrStepSz<float> bg_diff_filted2, PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body) { unsigned int y = blockIdx.y * blockDim.y + threadIdx.y + alpha_top; unsigned int x = blockIdx.x * blockDim.x + threadIdx.x + alpha_left; float bg_diff_abs_y = fabs(bg_diff_filted0.ptr(y)[x]); float bg_diff_abs_u = fabs(bg_diff_filted1.ptr(y)[x]); float bg_diff_abs_v = fabs(bg_diff_filted2.ptr(y)[x]); bg_diff_abs_y = MAX(0.0f, bg_diff_abs_y - Const.luma_offset_f); bg_diff_abs_u = bg_diff_abs_u * Const.u_gain_f; bg_diff_abs_v = bg_diff_abs_v * Const.v_gain_f; float bg_diff_all = (bg_diff_abs_y + bg_diff_abs_u + bg_diff_abs_v)*(fg_sure.ptr(y)[x] + 1); float motion_th = Const.alpha_TH_f; if ((y >= host_para.body_top - 1) && (y <= host_para.body_bottom - 1) && (x >= host_para.body_left - 1) && (x <= host_para.body_right - 1)) { is_body.ptr(y)[x] = 1; motion_th = Const.alpha_TH_f / 2; } else is_body.ptr(y)[x] = 0; if (bg_diff_all > motion_th * 2) { fg_sure.ptr(y)[x] = 1; fg_maybe.ptr(y)[x] = 1; } else { fg_sure.ptr(y)[x] = 0; if (bg_diff_all > motion_th) fg_maybe.ptr(y)[x] = 1; else fg_maybe.ptr(y)[x] = 0; } } __global__ void box_filter_kernel(PtrStepSz<float> filter_out, int ksize, float scale, int block_high, int block_wide, int PPT) { extern __shared__ float smem[]; int y_ldram = (int)blockIdx.y * block_high - ksize; int y_ldram_end = MIN((blockIdx.y + 1) * block_high + ksize, filter_out.rows + ksize); int x_ldram = (int)blockIdx.x * block_wide + threadIdx.x - ksize; int x_ldram_end = MIN((blockIdx.x + 1) * block_wide + ksize, filter_out.cols + ksize); int load_line = blockDim.x / (block_wide / PPT); float * sum_line = smem; float * raw_line = &smem[(2 * ksize + 1) * block_wide]; int raw_line_len = block_wide + ksize * 2 + 1; int x_raw_line = threadIdx.x / load_line; int y_raw_line = threadIdx.x % load_line; float * raw0 = &raw_line[y_raw_line * raw_line_len + x_raw_line*PPT]; int y_wrram = (int)blockIdx.y * block_high - ksize * 2; int y_wrsum = 0; float out = 0; if (threadIdx.x < block_wide) for (int i = 0; i < 2 * ksize + 1; i++) sum_line[i*block_wide + threadIdx.x] = 0; __syncthreads(); while (y_ldram < y_ldram_end) { for (unsigned ll = 0; ll < load_line && y_ldram < y_ldram_end; ll++, y_ldram++) if (x_ldram < x_ldram_end) raw_line[ll*raw_line_len + threadIdx.x] = tex2D(source_tex, x_ldram, y_ldram); __syncthreads(); float s0 = raw0[0]; for (int i = 1; i < ksize * 2 + 1; i++) s0 += raw0[i]; if (PPT == 8) { float s1, s2, s3, s4, s5, s6, s7; s1 = s0 + raw0[ksize * 2 + 1] - raw0[0]; s2 = s1 + raw0[ksize * 2 + 2] - raw0[1]; s3 = s2 + raw0[ksize * 2 + 3] - raw0[2]; s4 = s3 + raw0[ksize * 2 + 4] - raw0[3]; s5 = s4 + raw0[ksize * 2 + 5] - raw0[4]; s6 = s5 + raw0[ksize * 2 + 6] - raw0[5]; s7 = s6 + raw0[ksize * 2 + 7] - raw0[6]; __syncthreads(); raw0[0] = s0; raw0[1] = s1; raw0[2] = s2; raw0[3] = s3; raw0[4] = s4; raw0[5] = s5; raw0[6] = s6; raw0[7] = s7; } else if (PPT == 4) { float s1, s2, s3; s1 = s0 + raw0[ksize * 2 + 1] - raw0[0]; s2 = s1 + raw0[ksize * 2 + 2] - raw0[1]; s3 = s2 + raw0[ksize * 2 + 3] - raw0[2]; __syncthreads(); raw0[0] = s0; raw0[1] = s1; raw0[2] = s2; raw0[3] = s3; } __syncthreads(); if (x_ldram < x_ldram_end - ksize * 2) { int x_wrram = x_ldram + ksize; for (int i = 0; i < load_line && y_wrram < y_ldram_end - ksize; i++, y_wrram++) { out += raw_line[i*raw_line_len + threadIdx.x] - sum_line[y_wrsum*block_wide + threadIdx.x]; sum_line[y_wrsum*block_wide + threadIdx.x] = raw_line[i*raw_line_len + threadIdx.x]; y_wrsum = (y_wrsum >= ksize * 2) ? 0 : y_wrsum + 1; if (y_wrram >= (int)blockIdx.y * block_high) filter_out.ptr(y_wrram)[x_wrram] = out *scale; } } __syncthreads(); } } void box_filter_(PtrStepSzb & filter_out, int ksize, float scale, hipStream_t stream) { int block_high = 0, block_wide = 256, PPT = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); if (PPT == 0) { int threadx = block_wide + block_wide / 8 * (1 + (ksize * 2 - 1) / (block_wide / 8)); int smem_size = (2 * ksize + 1)*block_wide*sizeof(float) + threadx / (block_wide / 8) * (block_wide + ksize * 2 + 1) *sizeof(float); if (smem_size + 10 >= deviceProp.sharedMemPerBlock) PPT = 4; else PPT = 8; } if (block_high == 0) { int max = 0, col = divUp(filter_out.cols, block_wide); for (int i = 1; i <= 5; i++) { int e = (col * i) % deviceProp.multiProcessorCount; if (e == 0) e = deviceProp.multiProcessorCount; if (max < e) { max = e; block_high = (filter_out.rows - 1) / i + 1; } } } //printf("row =%d, col=%d, BH=%d, BW=%d, PPT=%d, k=%d\n", filter_out.rows, filter_out.cols, block_high, block_wide, PPT, ksize); CV_Assert(PPT == 4 || PPT == 8); const dim3 block(block_wide + MAX(block_wide / PPT * (1 + (ksize * 2 - 1) / (block_wide / PPT)), 32)); const dim3 grid(divUp(filter_out.cols, block_wide), divUp(filter_out.rows, block_high)); const size_t smemSize = (2 * ksize + 1)*block_wide*sizeof(float) + block.x / (block_wide / PPT) * (block_wide + ksize * 2 + 1) *sizeof(float); CV_Assert(block.x >= 2 * ksize + block_wide && smemSize < deviceProp.sharedMemPerBlock); //printf("thread =%d, Dimx=%d, Dimy=%d, smemSize=%d\n", block.x, grid.x, grid.y, smemSize); box_filter_kernel << <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(filter_out), ksize, scale, block_high, block_wide, PPT); } void trace_bg_(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2, PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, hipStream_t stream) { const dim3 block(BLOCK_WIDE, BLOCK_HIGH); const dim3 grid(divUp(frame_yuv.cols - 2, BLOCK_WIDE - 2), divUp(frame_yuv.rows - 2, BLOCK_HIGH - 2)); const size_t smemSize = BLOCK_WIDE * BLOCK_HIGH * sizeof(float); trace_bg_kernel<< <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(motion_diff_rgb_filted0), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted1), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted2), static_cast<PtrStepSz<float3>>(frame_yuv), static_cast<PtrStepSz<float3>>(bg_yuv), static_cast<PtrStepSz<float3>>(bg_diff_yuv), static_num, is_bg, is_body); } void update_mask_bg_(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2, PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, hipStream_t stream) { const dim3 block(BLOCK_WIDE, BLOCK_HIGH); const dim3 grid(divUp(fg_sure.cols - alpha_left - alpha_right, BLOCK_WIDE), divUp(fg_sure.rows - alpha_top - alpha_bottom, BLOCK_HIGH)); const size_t smemSize = 0; update_mask_bg_kernel << <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(bg_diff_filted0), static_cast<PtrStepSz<float>>(bg_diff_filted1), static_cast<PtrStepSz<float>>(bg_diff_filted2), fg_sure, fg_maybe, is_body); } } } } void trace_bg(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2, PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, hipStream_t stream) { CV_Assert(motion_diff_rgb_filted0.cols==is_bg.cols && frame_yuv.cols==is_bg.cols && bg_yuv.cols==is_bg.cols && bg_diff_yuv.cols==is_bg.cols && static_num.cols==is_bg.cols && is_body.cols==is_bg.cols); CV_Assert(motion_diff_rgb_filted0.rows==is_bg.rows && frame_yuv.rows==is_bg.rows && bg_yuv.rows==is_bg.rows && bg_diff_yuv.rows==is_bg.rows && static_num.rows==is_bg.rows && is_body.rows==is_bg.rows); device::trace_bg_(motion_diff_rgb_filted0, motion_diff_rgb_filted1, motion_diff_rgb_filted2, frame_yuv, bg_yuv, bg_diff_yuv, static_num, is_bg, is_body, stream); } void update_mask_bg(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2, PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, hipStream_t stream) { CV_Assert(bg_diff_filted0.cols==is_body.cols && bg_diff_filted1.cols==is_body.cols && bg_diff_filted2.cols==is_body.cols && fg_sure.cols==is_body.cols && fg_maybe.cols==is_body.cols); CV_Assert(bg_diff_filted0.rows==is_body.rows && bg_diff_filted1.rows==is_body.rows && bg_diff_filted2.rows==is_body.rows && fg_sure.rows==is_body.rows && fg_maybe.rows==is_body.rows); device::update_mask_bg_(bg_diff_filted0, bg_diff_filted1, bg_diff_filted2, fg_sure, fg_maybe, is_body, stream); } void box_filter_gpu(PtrStepSzb raw_in, PtrStepSzb filter_out, int ksize, float scale = -1, hipStream_t stream = NULL) { CV_Assert(raw_in.cols == filter_out.cols && raw_in.rows == filter_out.rows); if (scale == -1) scale = 1.0f / ((ksize * 2 + 1) *(ksize * 2 + 1)); cv::gpu::device::bindTexture(&source_tex, static_cast<PtrStepSzf> (raw_in)); cv::gpu::device::box_filter_(filter_out, ksize, scale, stream); } void tune_gpu_parameter(TuningParaFloat *c) { checkCudaErrors(hipMemcpyToSymbol(device::Const, c, sizeof(TuningParaFloat))); } void update_host_para(HostPara *p) { checkCudaErrors(hipMemcpyToSymbol(device::host_para, p, sizeof(HostPara))); } #endif
95d180a58cba69d2038841d4816997f3acc22255.cu
#include "opencv2/gpu/device/common.hpp" #include <opencv2/core/core.hpp> using namespace cv::gpu; #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "../Matting/matting.h" #if HAVE_GPU ==1 #define BLOCK_WIDE 64 #define BLOCK_HIGH 8 #define alpha_top 40 #define alpha_bottom 40 #define alpha_left 120 #define alpha_right 120 texture <float, cudaTextureType2D, cudaReadModeElementType> source_tex(false, cudaFilterModePoint, cudaAddressModeClamp); namespace cv { namespace gpu { namespace device { /*__device__ const float motion_TH_f = motion_TH / 255.0; __device__ const float static_SPEED_f = static_SPEED / 255.0; __device__ const float long_SPEED_f = long_SPEED / 255.0; __device__ const float luma_offset_f = luma_offset / 255.0f; __device__ const float u_gain_f = u_gain; __device__ const float v_gain_f = v_gain;*/ __constant__ TuningParaFloat Const; __constant__ HostPara host_para; __global__ void trace_bg_kernel(PtrStepSz<float> motion_diff_rgb_filted0, PtrStepSz<float> motion_diff_rgb_filted1, PtrStepSz<float> motion_diff_rgb_filted2, PtrStepSz<float3> frame_yuv, PtrStepSz<float3> bg_yuv, PtrStepSz<float3> bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body) { extern __shared__ float smem[]; typename float * gray = smem; unsigned int gray_idx = threadIdx.y * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * (blockDim.y-2) + threadIdx.y; unsigned int x = blockIdx.x * (blockDim.x-2) + threadIdx.x; if (y < static_num.rows && x < static_num.cols) { gray[gray_idx] = frame_yuv.ptr(y)[x].x; __syncthreads(); if (threadIdx.x != 0 && threadIdx.y != 0 && threadIdx.x != blockDim.x - 1 && threadIdx.y != blockDim.y - 1 && y + 1<static_num.rows && x + 1<static_num.cols) { float edge_offset = MAX(fabs(gray[gray_idx - blockDim.x - 1] - gray[gray_idx + blockDim.x + 1]), fabs(gray[gray_idx - blockDim.x + 1] - gray[gray_idx + blockDim.x - 1])) / 2; float motion_diff = fabs(motion_diff_rgb_filted0.ptr(y)[x]) + fabs(motion_diff_rgb_filted1.ptr(y)[x]) + fabs(motion_diff_rgb_filted2.ptr(y)[x]); unsigned char static_num_reg = static_num.ptr(y)[x]; if (motion_diff < edge_offset + Const.motion_TH_f) static_num_reg = MIN(static_num_reg + 1, Const.static_MAX); else static_num_reg = 0; static_num.ptr(y)[x] = static_num_reg; float3 bg_yuv_reg = bg_yuv.ptr(y)[x]; if (fabs(bg_yuv_reg.x) <= 0.001f && fabs(bg_yuv_reg.y - 1.0f) <= 0.001f && fabs(bg_yuv_reg.z) <=0.001f) { if (static_num_reg>= Const.init_static_num) bg_yuv.ptr(y)[x] = frame_yuv.ptr(y)[x]; } else { float update_speed; if (is_bg.ptr(y)[x] && static_num_reg >= Const.static_NUM) update_speed = Const.static_SPEED_f; else if (is_body.ptr(y)[x] == 0 && static_num_reg >= Const.long_static_NUM) update_speed = Const.long_SPEED_f; else update_speed = 0; float3 bg_diff_yuv_reg = bg_diff_yuv.ptr(y)[x]; bg_yuv_reg.x = (bg_diff_yuv_reg.x > 0) ? (bg_yuv_reg.x + update_speed) : (bg_yuv_reg.x - update_speed); bg_yuv_reg.y = (bg_diff_yuv_reg.y > 0) ? (bg_yuv_reg.y + update_speed) : (bg_yuv_reg.y - update_speed); bg_yuv_reg.z = (bg_diff_yuv_reg.z > 0) ? (bg_yuv_reg.z + update_speed) : (bg_yuv_reg.z - update_speed); bg_yuv.ptr(y)[x] = bg_yuv_reg; } } } } __global__ void update_mask_bg_kernel(PtrStepSz<float> bg_diff_filted0, PtrStepSz<float> bg_diff_filted1, PtrStepSz<float> bg_diff_filted2, PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body) { unsigned int y = blockIdx.y * blockDim.y + threadIdx.y + alpha_top; unsigned int x = blockIdx.x * blockDim.x + threadIdx.x + alpha_left; float bg_diff_abs_y = fabs(bg_diff_filted0.ptr(y)[x]); float bg_diff_abs_u = fabs(bg_diff_filted1.ptr(y)[x]); float bg_diff_abs_v = fabs(bg_diff_filted2.ptr(y)[x]); bg_diff_abs_y = MAX(0.0f, bg_diff_abs_y - Const.luma_offset_f); bg_diff_abs_u = bg_diff_abs_u * Const.u_gain_f; bg_diff_abs_v = bg_diff_abs_v * Const.v_gain_f; float bg_diff_all = (bg_diff_abs_y + bg_diff_abs_u + bg_diff_abs_v)*(fg_sure.ptr(y)[x] + 1); float motion_th = Const.alpha_TH_f; if ((y >= host_para.body_top - 1) && (y <= host_para.body_bottom - 1) && (x >= host_para.body_left - 1) && (x <= host_para.body_right - 1)) { is_body.ptr(y)[x] = 1; motion_th = Const.alpha_TH_f / 2; } else is_body.ptr(y)[x] = 0; if (bg_diff_all > motion_th * 2) { fg_sure.ptr(y)[x] = 1; fg_maybe.ptr(y)[x] = 1; } else { fg_sure.ptr(y)[x] = 0; if (bg_diff_all > motion_th) fg_maybe.ptr(y)[x] = 1; else fg_maybe.ptr(y)[x] = 0; } } __global__ void box_filter_kernel(PtrStepSz<float> filter_out, int ksize, float scale, int block_high, int block_wide, int PPT) { extern __shared__ float smem[]; int y_ldram = (int)blockIdx.y * block_high - ksize; int y_ldram_end = MIN((blockIdx.y + 1) * block_high + ksize, filter_out.rows + ksize); int x_ldram = (int)blockIdx.x * block_wide + threadIdx.x - ksize; int x_ldram_end = MIN((blockIdx.x + 1) * block_wide + ksize, filter_out.cols + ksize); int load_line = blockDim.x / (block_wide / PPT); float * sum_line = smem; float * raw_line = &smem[(2 * ksize + 1) * block_wide]; int raw_line_len = block_wide + ksize * 2 + 1; int x_raw_line = threadIdx.x / load_line; int y_raw_line = threadIdx.x % load_line; float * raw0 = &raw_line[y_raw_line * raw_line_len + x_raw_line*PPT]; int y_wrram = (int)blockIdx.y * block_high - ksize * 2; int y_wrsum = 0; float out = 0; if (threadIdx.x < block_wide) for (int i = 0; i < 2 * ksize + 1; i++) sum_line[i*block_wide + threadIdx.x] = 0; __syncthreads(); while (y_ldram < y_ldram_end) { for (unsigned ll = 0; ll < load_line && y_ldram < y_ldram_end; ll++, y_ldram++) if (x_ldram < x_ldram_end) raw_line[ll*raw_line_len + threadIdx.x] = tex2D(source_tex, x_ldram, y_ldram); __syncthreads(); float s0 = raw0[0]; for (int i = 1; i < ksize * 2 + 1; i++) s0 += raw0[i]; if (PPT == 8) { float s1, s2, s3, s4, s5, s6, s7; s1 = s0 + raw0[ksize * 2 + 1] - raw0[0]; s2 = s1 + raw0[ksize * 2 + 2] - raw0[1]; s3 = s2 + raw0[ksize * 2 + 3] - raw0[2]; s4 = s3 + raw0[ksize * 2 + 4] - raw0[3]; s5 = s4 + raw0[ksize * 2 + 5] - raw0[4]; s6 = s5 + raw0[ksize * 2 + 6] - raw0[5]; s7 = s6 + raw0[ksize * 2 + 7] - raw0[6]; __syncthreads(); raw0[0] = s0; raw0[1] = s1; raw0[2] = s2; raw0[3] = s3; raw0[4] = s4; raw0[5] = s5; raw0[6] = s6; raw0[7] = s7; } else if (PPT == 4) { float s1, s2, s3; s1 = s0 + raw0[ksize * 2 + 1] - raw0[0]; s2 = s1 + raw0[ksize * 2 + 2] - raw0[1]; s3 = s2 + raw0[ksize * 2 + 3] - raw0[2]; __syncthreads(); raw0[0] = s0; raw0[1] = s1; raw0[2] = s2; raw0[3] = s3; } __syncthreads(); if (x_ldram < x_ldram_end - ksize * 2) { int x_wrram = x_ldram + ksize; for (int i = 0; i < load_line && y_wrram < y_ldram_end - ksize; i++, y_wrram++) { out += raw_line[i*raw_line_len + threadIdx.x] - sum_line[y_wrsum*block_wide + threadIdx.x]; sum_line[y_wrsum*block_wide + threadIdx.x] = raw_line[i*raw_line_len + threadIdx.x]; y_wrsum = (y_wrsum >= ksize * 2) ? 0 : y_wrsum + 1; if (y_wrram >= (int)blockIdx.y * block_high) filter_out.ptr(y_wrram)[x_wrram] = out *scale; } } __syncthreads(); } } void box_filter_(PtrStepSzb & filter_out, int ksize, float scale, cudaStream_t stream) { int block_high = 0, block_wide = 256, PPT = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); if (PPT == 0) { int threadx = block_wide + block_wide / 8 * (1 + (ksize * 2 - 1) / (block_wide / 8)); int smem_size = (2 * ksize + 1)*block_wide*sizeof(float) + threadx / (block_wide / 8) * (block_wide + ksize * 2 + 1) *sizeof(float); if (smem_size + 10 >= deviceProp.sharedMemPerBlock) PPT = 4; else PPT = 8; } if (block_high == 0) { int max = 0, col = divUp(filter_out.cols, block_wide); for (int i = 1; i <= 5; i++) { int e = (col * i) % deviceProp.multiProcessorCount; if (e == 0) e = deviceProp.multiProcessorCount; if (max < e) { max = e; block_high = (filter_out.rows - 1) / i + 1; } } } //printf("row =%d, col=%d, BH=%d, BW=%d, PPT=%d, k=%d\n", filter_out.rows, filter_out.cols, block_high, block_wide, PPT, ksize); CV_Assert(PPT == 4 || PPT == 8); const dim3 block(block_wide + MAX(block_wide / PPT * (1 + (ksize * 2 - 1) / (block_wide / PPT)), 32)); const dim3 grid(divUp(filter_out.cols, block_wide), divUp(filter_out.rows, block_high)); const size_t smemSize = (2 * ksize + 1)*block_wide*sizeof(float) + block.x / (block_wide / PPT) * (block_wide + ksize * 2 + 1) *sizeof(float); CV_Assert(block.x >= 2 * ksize + block_wide && smemSize < deviceProp.sharedMemPerBlock); //printf("thread =%d, Dimx=%d, Dimy=%d, smemSize=%d\n", block.x, grid.x, grid.y, smemSize); box_filter_kernel << <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(filter_out), ksize, scale, block_high, block_wide, PPT); } void trace_bg_(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2, PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, cudaStream_t stream) { const dim3 block(BLOCK_WIDE, BLOCK_HIGH); const dim3 grid(divUp(frame_yuv.cols - 2, BLOCK_WIDE - 2), divUp(frame_yuv.rows - 2, BLOCK_HIGH - 2)); const size_t smemSize = BLOCK_WIDE * BLOCK_HIGH * sizeof(float); trace_bg_kernel<< <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(motion_diff_rgb_filted0), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted1), static_cast<PtrStepSz<float>>(motion_diff_rgb_filted2), static_cast<PtrStepSz<float3>>(frame_yuv), static_cast<PtrStepSz<float3>>(bg_yuv), static_cast<PtrStepSz<float3>>(bg_diff_yuv), static_num, is_bg, is_body); } void update_mask_bg_(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2, PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, cudaStream_t stream) { const dim3 block(BLOCK_WIDE, BLOCK_HIGH); const dim3 grid(divUp(fg_sure.cols - alpha_left - alpha_right, BLOCK_WIDE), divUp(fg_sure.rows - alpha_top - alpha_bottom, BLOCK_HIGH)); const size_t smemSize = 0; update_mask_bg_kernel << <grid, block, smemSize, stream >> > (static_cast<PtrStepSz<float>>(bg_diff_filted0), static_cast<PtrStepSz<float>>(bg_diff_filted1), static_cast<PtrStepSz<float>>(bg_diff_filted2), fg_sure, fg_maybe, is_body); } } } } void trace_bg(PtrStepSzb motion_diff_rgb_filted0, PtrStepSzb motion_diff_rgb_filted1, PtrStepSzb motion_diff_rgb_filted2, PtrStepSzb frame_yuv, PtrStepSzb bg_yuv, PtrStepSzb bg_diff_yuv, PtrStepSzb static_num, PtrStepSzb is_bg, PtrStepSzb is_body, cudaStream_t stream) { CV_Assert(motion_diff_rgb_filted0.cols==is_bg.cols && frame_yuv.cols==is_bg.cols && bg_yuv.cols==is_bg.cols && bg_diff_yuv.cols==is_bg.cols && static_num.cols==is_bg.cols && is_body.cols==is_bg.cols); CV_Assert(motion_diff_rgb_filted0.rows==is_bg.rows && frame_yuv.rows==is_bg.rows && bg_yuv.rows==is_bg.rows && bg_diff_yuv.rows==is_bg.rows && static_num.rows==is_bg.rows && is_body.rows==is_bg.rows); device::trace_bg_(motion_diff_rgb_filted0, motion_diff_rgb_filted1, motion_diff_rgb_filted2, frame_yuv, bg_yuv, bg_diff_yuv, static_num, is_bg, is_body, stream); } void update_mask_bg(PtrStepSzb bg_diff_filted0, PtrStepSzb bg_diff_filted1, PtrStepSzb bg_diff_filted2, PtrStepSzb fg_sure, PtrStepSzb fg_maybe, PtrStepSzb is_body, cudaStream_t stream) { CV_Assert(bg_diff_filted0.cols==is_body.cols && bg_diff_filted1.cols==is_body.cols && bg_diff_filted2.cols==is_body.cols && fg_sure.cols==is_body.cols && fg_maybe.cols==is_body.cols); CV_Assert(bg_diff_filted0.rows==is_body.rows && bg_diff_filted1.rows==is_body.rows && bg_diff_filted2.rows==is_body.rows && fg_sure.rows==is_body.rows && fg_maybe.rows==is_body.rows); device::update_mask_bg_(bg_diff_filted0, bg_diff_filted1, bg_diff_filted2, fg_sure, fg_maybe, is_body, stream); } void box_filter_gpu(PtrStepSzb raw_in, PtrStepSzb filter_out, int ksize, float scale = -1, cudaStream_t stream = NULL) { CV_Assert(raw_in.cols == filter_out.cols && raw_in.rows == filter_out.rows); if (scale == -1) scale = 1.0f / ((ksize * 2 + 1) *(ksize * 2 + 1)); cv::gpu::device::bindTexture(&source_tex, static_cast<PtrStepSzf> (raw_in)); cv::gpu::device::box_filter_(filter_out, ksize, scale, stream); } void tune_gpu_parameter(TuningParaFloat *c) { checkCudaErrors(cudaMemcpyToSymbol(device::Const, c, sizeof(TuningParaFloat))); } void update_host_para(HostPara *p) { checkCudaErrors(cudaMemcpyToSymbol(device::host_para, p, sizeof(HostPara))); } #endif
6c2963a234d3fd629db26157f9492821c26d859b.hip
// !!! This is a file automatically generated by hipify!!! /* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #define CHUNK_K 1 #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 0 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ hipGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void apmm_w1a2(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) { // GEMM configuration. int K_TILES = K_GLOBAL / 128; int W_bit_offset = M_GLOBAL*K_GLOBAL/128; int X_bit_offset = N_GLOBAL*K_GLOBAL/128; int ROW_BIT = K_GLOBAL/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j)); // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int b=0; b<xb; b++) { // for(int i=0; i<N_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j)); // } // } // } // } for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = block_pos / (N_GLOBAL/32) * 64; const unsigned int block_tile_j = block_pos % (N_GLOBAL/32) * 32; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_GLOBAL) { break; } typedef union { int4 vec; int a[4]; } U4; wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const int4 *warp_ptr; if (warpId < 2) { warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 32 * ROW_BIT; } else if (warpId == 2) { warp_ptr = &X[block_tile_j * ROW_BIT]; } else if (warpId == 3) { warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset]; } // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId*32 + laneId; // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. // int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) + // (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) + // (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit. int4 *lane_ptr = (int4*)(warp_ptr + laneId*ROW_BIT + tile_k); // (K/128), since K=128 in bit. int4 is 128 bit. if (warpId < 4) { // Copy 16 bytes at once in each lane. shmem[shmem_idx][0] = *lane_ptr; } // U4 tmp_probe; // tmp_probe.vec = *lane_ptr; // printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]); __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<128; i++) { // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int t=0; t<a[i].num_elements; t++) { // printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]); // } // printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step); // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 64 * 16 + (warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8; wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT); } } __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=62; i<64; i++) { // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j)); // } // } // } // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. // int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO. size_t idx = warpId * 8 * 64 + (laneId%8) * 4 + (laneId/8)*2*64; int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx; U4 tmp0; U4 tmp1; U4 val[2]; #pragma unroll for (int i = 0; i < 2; i++) { tmp0.vec = *((int4*)shmem_warp_stream_ptr); tmp1.vec = *((int4*)shmem_warp_stream_ptr+8); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i = 0; i < 4; i++) { // printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]); // } // printf("\n"); // for(int i = 0; i < 4; i++) { // printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]); // } // printf("\n"); // } val[i].a[0] = tmp0.a[0] + 2*tmp1.a[0]; val[i].a[1] = tmp0.a[1] + 2*tmp1.a[1]; val[i].a[2] = tmp0.a[2] + 2*tmp1.a[2]; val[i].a[3] = tmp0.a[3] + 2*tmp1.a[3]; // if (warpId == 7) { // printf("warpId: %d, laneId: %d, idx: %d, val[%d].a: %d, %d, %d, %d, tmp0: %d %d %d %d, tmp1: %d %d %d %d \n", warpId, laneId, idx, i, val[i].a[0], val[i].a[1], val[i].a[2], val[i].a[3], tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3], tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3] ); // } shmem_warp_stream_ptr += 64; } __syncthreads(); // This warp's pointer to the C matrix data to copy memory from to shared memory. // TODO: May be moved outside the for loop. size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + warpId*8*N_GLOBAL + (laneId%8)*4 + (laneId/8)*2*N_GLOBAL; // printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx); // Now that shared memory contains all the D tiles, stream them to global memory. int *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < 2; i++) { *((int4 *)(dst_gmem_warp_stream_ptr + i*N_GLOBAL)) = val[i].vec; } __syncthreads(); } } void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){ int *W_int = (int*) W; int *X_int = (int*) X; for(int b=0; b<W_BIT; b++) { for(int i = 0; i < M_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i; W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } for(int b = 0; b<X_BIT; b++) { for(int i = 0; i < N_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j; X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } } int popcnt(int i) { // Java: use int, and use >>> instead of >> // C or C++: use int i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) { int *W_int = (int*) W; int *X_int = (int*) X; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } ref_C[m*N_GLOBAL+n]= tmp; } } } void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. int *W_int = (int*) W; int *X_int = (int*) X; int C_ref_before_decompose[M_GLOBAL*N_GLOBAL]; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } C_ref_before_decompose[m*K_GLOBAL+n]= tmp; } } for(int m=0; m<M_GLOBAL; m++) { for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) { int val[OUT_BIT]; for(int b=0; b<OUT_BIT; b++) val[b] = 0; for(int n=0; n<32; n++) { int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n]; tmp = (tmp - 128); // Can be modified for other quantized parameters. for(int b=0; b<OUT_BIT; b++) { int mask = 1; val[b] = val[b] << 1; val[b] = val[b] | ((mask<<b) & tmp); } } for(int b=0; b<OUT_BIT; b++) { ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b]; } } } } void validate_results(int *C, int* ref_C, int M_, int N_) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < M_; i++) { for(int j = 0; j < N_; j++) { int idx = i*N_+j; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result with pack for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int m = 0; m < M_; m++) { for(int n_tile = 0; n_tile < N_/32; n_tile++) { for(int b=0; b<OUT_BIT; b++) { int idx = b*M_*N_/32 + m*N_/32+n_tile; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } #define verify_output int main(int argc, char **argv) { int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); int X_BIT = 2; int W_BIT = 1; for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) { // int M_GLOBAL = 256; int N_GLOBAL = M_GLOBAL; int K_GLOBAL = M_GLOBAL; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT)); checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL)); #ifdef verify_output int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT); X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT); Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); printf("Preparing validation data for GPU...\n"); init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, hipMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(hipFuncSetAttribute( apmm_w1a2, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 200; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; hipEvent_t bmma_start; hipEvent_t bmma_end; hipEventCreate(&bmma_start); hipEventCreate(&bmma_end); hipEventRecord(bmma_start); checkKernelErrors( hipLaunchKernelGGL(( (apmm_w1a2), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT))); hipEventRecord(bmma_end); hipEventSynchronize(bmma_end); hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end); hipEventDestroy(bmma_start); hipEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES; printf("V30, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12); #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost)); int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); /* Copmpute reference matrix on CPU */ compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); /* validation results */ validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL); free(W_h); free(X_h); free(Output_h); free(Output_ref); #endif checkCudaErrors(hipFree(reinterpret_cast<void *>(W))); checkCudaErrors(hipFree(reinterpret_cast<void *>(X))); checkCudaErrors(hipFree(reinterpret_cast<void *>(Output))); } return EXIT_SUCCESS; }
6c2963a234d3fd629db26157f9492821c26d859b.cu
/* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <cuda.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #define CHUNK_K 1 #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 0 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ cudaGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void apmm_w1a2(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) { // GEMM configuration. int K_TILES = K_GLOBAL / 128; int W_bit_offset = M_GLOBAL*K_GLOBAL/128; int X_bit_offset = N_GLOBAL*K_GLOBAL/128; int ROW_BIT = K_GLOBAL/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j)); // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int b=0; b<xb; b++) { // for(int i=0; i<N_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j)); // } // } // } // } for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = block_pos / (N_GLOBAL/32) * 64; const unsigned int block_tile_j = block_pos % (N_GLOBAL/32) * 32; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_GLOBAL) { break; } typedef union { int4 vec; int a[4]; } U4; wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const int4 *warp_ptr; if (warpId < 2) { warp_ptr = &W[block_tile_i * ROW_BIT] + warpId * 32 * ROW_BIT; } else if (warpId == 2) { warp_ptr = &X[block_tile_j * ROW_BIT]; } else if (warpId == 3) { warp_ptr = &X[block_tile_j * ROW_BIT + X_bit_offset]; } // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId*32 + laneId; // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. // int4 *lane_ptr = (int4 *)(warp_ptr + tile_k * (K/128) + // (laneId / CHUNK_COPY_LINE_LANES) * (K_GLOBAL/128)) + // (laneId % CHUNK_COPY_LINE_LANES); // (K/128), since K=128 in bit. int4 is 128 bit. int4 *lane_ptr = (int4*)(warp_ptr + laneId*ROW_BIT + tile_k); // (K/128), since K=128 in bit. int4 is 128 bit. if (warpId < 4) { // Copy 16 bytes at once in each lane. shmem[shmem_idx][0] = *lane_ptr; } // U4 tmp_probe; // tmp_probe.vec = *lane_ptr; // printf("tmp_probe.a[0]: %d, tmp_probe.a[1]: %d, tmp_probe.a[2]: %d, tmp_probe.a[3]: %d\n", tmp_probe.a[0], tmp_probe.a[1], tmp_probe.a[2], tmp_probe.a[3]); __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<128; i++) { // printf("Load from GL. i: %d, val: %x %x %x %x \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int t=0; t<a[i].num_elements; t++) { // printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]); // } // printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step); // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 64 * 16 + (warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8; wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT); } } __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=62; i<64; i++) { // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j)); // } // } // } // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. // int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO. size_t idx = warpId * 8 * 64 + (laneId%8) * 4 + (laneId/8)*2*64; int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+idx; U4 tmp0; U4 tmp1; U4 val[2]; #pragma unroll for (int i = 0; i < 2; i++) { tmp0.vec = *((int4*)shmem_warp_stream_ptr); tmp1.vec = *((int4*)shmem_warp_stream_ptr+8); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i = 0; i < 4; i++) { // printf("tmp0.a[%d], %d ", 3-i, tmp0.a[3-i]); // } // printf("\n"); // for(int i = 0; i < 4; i++) { // printf("tmp1.a[%d], %d ", 3-i, tmp1.a[3-i]); // } // printf("\n"); // } val[i].a[0] = tmp0.a[0] + 2*tmp1.a[0]; val[i].a[1] = tmp0.a[1] + 2*tmp1.a[1]; val[i].a[2] = tmp0.a[2] + 2*tmp1.a[2]; val[i].a[3] = tmp0.a[3] + 2*tmp1.a[3]; // if (warpId == 7) { // printf("warpId: %d, laneId: %d, idx: %d, val[%d].a: %d, %d, %d, %d, tmp0: %d %d %d %d, tmp1: %d %d %d %d \n", warpId, laneId, idx, i, val[i].a[0], val[i].a[1], val[i].a[2], val[i].a[3], tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3], tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3] ); // } shmem_warp_stream_ptr += 64; } __syncthreads(); // This warp's pointer to the C matrix data to copy memory from to shared memory. // TODO: May be moved outside the for loop. size_t gmem_idx = block_tile_i*N_GLOBAL + block_tile_j + warpId*8*N_GLOBAL + (laneId%8)*4 + (laneId/8)*2*N_GLOBAL; // printf("block_tile_i: %d, block_tile_j: %d, warpId: %d, laneId: %d, gmem_idx: %d\n", block_tile_i, block_tile_j, warpId, laneId, gmem_idx); // Now that shared memory contains all the D tiles, stream them to global memory. int *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < 2; i++) { *((int4 *)(dst_gmem_warp_stream_ptr + i*N_GLOBAL)) = val[i].vec; } __syncthreads(); } } void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){ int *W_int = (int*) W; int *X_int = (int*) X; for(int b=0; b<W_BIT; b++) { for(int i = 0; i < M_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i; W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } for(int b = 0; b<X_BIT; b++) { for(int i = 0; i < N_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // X_int[i*K_GLOBAL/32+j] = i*M_GLOBAL + j; X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } } int popcnt(int i) { // Java: use int, and use >>> instead of >> // C or C++: use int i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) { int *W_int = (int*) W; int *X_int = (int*) X; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } ref_C[m*N_GLOBAL+n]= tmp; } } } void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. int *W_int = (int*) W; int *X_int = (int*) X; int C_ref_before_decompose[M_GLOBAL*N_GLOBAL]; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } C_ref_before_decompose[m*K_GLOBAL+n]= tmp; } } for(int m=0; m<M_GLOBAL; m++) { for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) { int val[OUT_BIT]; for(int b=0; b<OUT_BIT; b++) val[b] = 0; for(int n=0; n<32; n++) { int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n]; tmp = (tmp - 128); // Can be modified for other quantized parameters. for(int b=0; b<OUT_BIT; b++) { int mask = 1; val[b] = val[b] << 1; val[b] = val[b] | ((mask<<b) & tmp); } } for(int b=0; b<OUT_BIT; b++) { ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b]; } } } } void validate_results(int *C, int* ref_C, int M_, int N_) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < M_; i++) { for(int j = 0; j < N_; j++) { int idx = i*N_+j; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result with pack for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int m = 0; m < M_; m++) { for(int n_tile = 0; n_tile < N_/32; n_tile++) { for(int b=0; b<OUT_BIT; b++) { int idx = b*M_*N_/32 + m*N_/32+n_tile; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } #define verify_output int main(int argc, char **argv) { int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); int X_BIT = 2; int W_BIT = 1; for (int M_GLOBAL=128; M_GLOBAL<=1024; M_GLOBAL += 128 ) { // int M_GLOBAL = 256; int N_GLOBAL = M_GLOBAL; int K_GLOBAL = M_GLOBAL; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT)); checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL)); #ifdef verify_output int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT); X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT); Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); printf("Preparing validation data for GPU...\n"); init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, cudaMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(cudaFuncSetAttribute( apmm_w1a2, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 200; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; cudaEvent_t bmma_start; cudaEvent_t bmma_end; cudaEventCreate(&bmma_start); cudaEventCreate(&bmma_end); cudaEventRecord(bmma_start); checkKernelErrors( (apmm_w1a2<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT))); cudaEventRecord(bmma_end); cudaEventSynchronize(bmma_end); cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end); cudaEventDestroy(bmma_start); cudaEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES; printf("V30, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)(M_GLOBAL) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12); #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost)); int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); /* Copmpute reference matrix on CPU */ compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); /* validation results */ validate_results(Output_h, Output_ref, M_GLOBAL, N_GLOBAL); free(W_h); free(X_h); free(Output_h); free(Output_ref); #endif checkCudaErrors(cudaFree(reinterpret_cast<void *>(W))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(X))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output))); } return EXIT_SUCCESS; }
598c0a79003e54a66e6726b28822bc5623b1bd81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> const int SHARED_MEM_SIZE = 128*sizeof(int); __global__ void ReverseFunc(int *a, int *r, int N){ __shared__ int sh[SHARED_MEM_SIZE]; int id = threadIdx.x + blockDim.x*blockIdx.x; sh[threadIdx.x] = a[id]; __syncthreads(); r[id] = sh[blockDim.x-threadIdx.x-1]; } int main(){ int *a, *r; int *d_a, *d_r; int N = 1024; int size = N*sizeof(int); a = (int*)malloc(size); r = (int*)malloc(size); hipMalloc(&d_a, size); hipMalloc(&d_r, size); for(int i = 0; i < N; i++){a[i] = i;} hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); int threadsPerBlock = 64; int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock; hipLaunchKernelGGL(( ReverseFunc), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_a, d_r, N); // hipDeviceSynchronize(); hipMemcpy(r,d_r,size,hipMemcpyDeviceToHost); // for(int i = 0; i< N; i++){std::cout << r[i] << std::endl;} free(a); free(r); hipFree(d_a); hipFree(d_r); return 0; }
598c0a79003e54a66e6726b28822bc5623b1bd81.cu
#include<iostream> const int SHARED_MEM_SIZE = 128*sizeof(int); __global__ void ReverseFunc(int *a, int *r, int N){ __shared__ int sh[SHARED_MEM_SIZE]; int id = threadIdx.x + blockDim.x*blockIdx.x; sh[threadIdx.x] = a[id]; __syncthreads(); r[id] = sh[blockDim.x-threadIdx.x-1]; } int main(){ int *a, *r; int *d_a, *d_r; int N = 1024; int size = N*sizeof(int); a = (int*)malloc(size); r = (int*)malloc(size); cudaMalloc(&d_a, size); cudaMalloc(&d_r, size); for(int i = 0; i < N; i++){a[i] = i;} cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); int threadsPerBlock = 64; int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock; ReverseFunc<<<blocksPerGrid,threadsPerBlock>>>(d_a, d_r, N); // cudaThreadSynchronize(); cudaMemcpy(r,d_r,size,cudaMemcpyDeviceToHost); // for(int i = 0; i< N; i++){std::cout << r[i] << std::endl;} free(a); free(r); cudaFree(d_a); cudaFree(d_r); return 0; }
ea1ba10c1718e2839d737c7a35f7048a9a3eaa5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* During execution there is a finer grouping of threads into warps. Multiprocessors on the GPU execute instructions for each warp in SIMD (Single Instruction Multiple Data) fashion. The warp size (effectively the SIMD width) of all current CUDA-capable GPUs is 32 threads. Grouping of threads into warps is not only relevant to computation, but also to global memory accesses. The device coalesces lobal memory loads and stores issued by threads of a warp into as few transactions as possible to minimize DRAM bandwidth */ #include <iostream> #include <assert.h> // Wrapper for cuda status inline hipError_t checkCuda(hipError_t result){ #if defined(DEBUG) || defined(_DEBUG) if(result != hipSuccess){ std::cout << " Cuda Runtime Error: " << result << std::endl; assert(result == hipSuccess); } #endif return result; } template<typename T> __global__ void offset(T* vector, int stride){ int index = blockDim.x * blockIdx.x + threadIdx.x + stride; vector[index]++; } template<typename T> __global__ void stride(T* vector, int stride){ int index = (blockDim.x * blockIdx.x + threadIdx.x) * stride; vector[index]++; } template<typename T> void runTest(int device_id, size_t size_MB){ size_t block_size = 256; float milliseconds; T *d_a; size_t size_bytes = (size_MB * 1024 * 1024) / sizeof(T); checkCuda(hipMalloc(&d_a, size_bytes * 33 * sizeof(T))); hipEvent_t startEvent, stopEvent; checkCuda(hipEventCreate(&startEvent)); checkCuda(hipEventCreate(&stopEvent)); std::cout << " Offset - Bandwidth (GB/s): " << '\n'; hipLaunchKernelGGL(( offset), dim3(size_bytes / block_size), dim3(block_size), 0, 0, d_a, 0); //warm up for(size_t i = 0; i!= 32; ++i){ checkCuda(hipMemset(d_a, 0.0, size_bytes * sizeof(T))); checkCuda(hipEventRecord(startEvent, 0)); hipLaunchKernelGGL(( offset), dim3(size_bytes / block_size), dim3(block_size), 0, 0, d_a, i); checkCuda(hipEventRecord(stopEvent, 0)); checkCuda(hipEventSynchronize(stopEvent)); checkCuda(hipEventElapsedTime(&milliseconds, startEvent, stopEvent)); std::cout << " " << i << " - " << (2 * size_MB) / milliseconds << std::endl; } std::cout << " Stride - Bandwidth (GB/s): " << std::endl; hipLaunchKernelGGL(( stride), dim3(size_bytes / block_size), dim3(block_size), 0, 0, d_a, 0); //warm up for(size_t i = 0; i != 32; ++i){ checkCuda(hipMemset(d_a, 0.0, size_bytes * sizeof(T))); checkCuda(hipEventRecord(startEvent, 0)); hipLaunchKernelGGL(( stride), dim3(size_bytes / block_size), dim3(block_size), 0, 0, d_a, i); checkCuda(hipEventRecord(stopEvent, 0)); checkCuda(hipEventSynchronize(stopEvent)); checkCuda(hipEventElapsedTime(&milliseconds, startEvent, stopEvent)); std::cout << " " << i << " - " << (2 * size_MB) / milliseconds <<std::endl; } // Clean up checkCuda(hipEventDestroy(startEvent)); checkCuda(hipEventDestroy(stopEvent)); checkCuda(hipFree(d_a)); } int main(int argc, char* argv[]){ size_t size_MB = 4; int device_id = 0; bool is_doublePrecision = false; for(size_t i = 1; i != argc; ++i){ if(!strncmp(argv[i], "dev=", 4)){ device_id = atoi((char*)(&argv[i][4])); } else if(!strcmp(argv[i], "fp64")){ is_doublePrecision = true; } } hipDeviceProp_t device; checkCuda(hipSetDevice(device_id)); checkCuda(hipGetDeviceProperties(&device, device_id)); std::cout << "\n Device: " << device.name << '\n'; std::cout << " Transfer size (MB): " << size_MB << '\n'; std::string precision = (is_doublePrecision) ? "Double" : "Single"; std::cout << " Precision : " << precision << '\n'; if(is_doublePrecision) runTest<double>(device_id, size_MB); else runTest<float>(device_id, size_MB); return 0; }
ea1ba10c1718e2839d737c7a35f7048a9a3eaa5c.cu
/* During execution there is a finer grouping of threads into warps. Multiprocessors on the GPU execute instructions for each warp in SIMD (Single Instruction Multiple Data) fashion. The warp size (effectively the SIMD width) of all current CUDA-capable GPUs is 32 threads. Grouping of threads into warps is not only relevant to computation, but also to global memory accesses. The device coalesces lobal memory loads and stores issued by threads of a warp into as few transactions as possible to minimize DRAM bandwidth */ #include <iostream> #include <assert.h> // Wrapper for cuda status inline cudaError_t checkCuda(cudaError_t result){ #if defined(DEBUG) || defined(_DEBUG) if(result != cudaSuccess){ std::cout << " Cuda Runtime Error: " << result << std::endl; assert(result == cudaSuccess); } #endif return result; } template<typename T> __global__ void offset(T* vector, int stride){ int index = blockDim.x * blockIdx.x + threadIdx.x + stride; vector[index]++; } template<typename T> __global__ void stride(T* vector, int stride){ int index = (blockDim.x * blockIdx.x + threadIdx.x) * stride; vector[index]++; } template<typename T> void runTest(int device_id, size_t size_MB){ size_t block_size = 256; float milliseconds; T *d_a; size_t size_bytes = (size_MB * 1024 * 1024) / sizeof(T); checkCuda(cudaMalloc(&d_a, size_bytes * 33 * sizeof(T))); cudaEvent_t startEvent, stopEvent; checkCuda(cudaEventCreate(&startEvent)); checkCuda(cudaEventCreate(&stopEvent)); std::cout << " Offset - Bandwidth (GB/s): " << '\n'; offset<<<size_bytes / block_size, block_size>>>(d_a, 0); //warm up for(size_t i = 0; i!= 32; ++i){ checkCuda(cudaMemset(d_a, 0.0, size_bytes * sizeof(T))); checkCuda(cudaEventRecord(startEvent, 0)); offset<<<size_bytes / block_size, block_size>>>(d_a, i); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent)); std::cout << " " << i << " - " << (2 * size_MB) / milliseconds << std::endl; } std::cout << " Stride - Bandwidth (GB/s): " << std::endl; stride<<<size_bytes / block_size, block_size>>>(d_a, 0); //warm up for(size_t i = 0; i != 32; ++i){ checkCuda(cudaMemset(d_a, 0.0, size_bytes * sizeof(T))); checkCuda(cudaEventRecord(startEvent, 0)); stride<<<size_bytes / block_size, block_size>>>(d_a, i); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent)); std::cout << " " << i << " - " << (2 * size_MB) / milliseconds <<std::endl; } // Clean up checkCuda(cudaEventDestroy(startEvent)); checkCuda(cudaEventDestroy(stopEvent)); checkCuda(cudaFree(d_a)); } int main(int argc, char* argv[]){ size_t size_MB = 4; int device_id = 0; bool is_doublePrecision = false; for(size_t i = 1; i != argc; ++i){ if(!strncmp(argv[i], "dev=", 4)){ device_id = atoi((char*)(&argv[i][4])); } else if(!strcmp(argv[i], "fp64")){ is_doublePrecision = true; } } cudaDeviceProp device; checkCuda(cudaSetDevice(device_id)); checkCuda(cudaGetDeviceProperties(&device, device_id)); std::cout << "\n Device: " << device.name << '\n'; std::cout << " Transfer size (MB): " << size_MB << '\n'; std::string precision = (is_doublePrecision) ? "Double" : "Single"; std::cout << " Precision : " << precision << '\n'; if(is_doublePrecision) runTest<double>(device_id, size_MB); else runTest<float>(device_id, size_MB); return 0; }
d48b7cd0b0e1374567a6003c607c64887ce1303d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *c, int *a, int *b) { *c = *a + *b; } int main() { int a = 5, b = 4, c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c // Allocate space for device copies of a, b, c hipMalloc(&d_a, sizeof(int)); hipMalloc(&d_b, sizeof(int)); hipMalloc(&d_c, sizeof(int)); // Copy inputs to device hipMemcpy(d_a, &a, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, &b, sizeof(int), hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, d_c, d_a, d_b); // Copy result back to host hipMemcpy(&c, d_c, sizeof(int), hipMemcpyDeviceToHost); // Cleanup hipFree(d_a); hipFree(d_b); hipFree(d_c); printf("Result: %d\n", c); return 0; }
d48b7cd0b0e1374567a6003c607c64887ce1303d.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *c, int *a, int *b) { *c = *a + *b; } int main() { int a = 5, b = 4, c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c // Allocate space for device copies of a, b, c cudaMalloc(&d_a, sizeof(int)); cudaMalloc(&d_b, sizeof(int)); cudaMalloc(&d_c, sizeof(int)); // Copy inputs to device cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<1, 1>>>(d_c, d_a, d_b); // Copy result back to host cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost); // Cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("Result: %d\n", c); return 0; }
220a9a5dfeb024e25d8e276cf9d82abba2de9cf9.hip
// !!! This is a file automatically generated by hipify!!! // ConsoleApplication2.cpp : Defines the entry point for the console application. // #include<iostream> #include<math.h> #include<conio.h> #include<stdlib.h> #include<vector> #include<algorithm> #include<map> #include<iterator> #include <fstream> #include <streambuf> #include<string> #include <dirent.h> #include <boost/algorithm/string.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <random> int D = 10000; int N = 3; std::vector<int> genRandomHV() { std::vector<int> randomIndex(D); std::vector<int> randomHV(D); std::mt19937 r{ std::random_device{}() }; if ((D % 2) == 1) { std::cout << "Dimension is odd"; } else { for (int i = 0; i < D; i++) randomIndex[i] = i; std::shuffle(randomIndex.begin(), randomIndex.end(), r); for (int i = 0;i < D / 2;i++) randomHV[randomIndex[i]] = 1; for (int i = D / 2;i < D;i++) randomHV[randomIndex[i]] = -1; } return randomHV; } std::map<char, std::vector<int>> createItemMemory(std::map<char, std::vector<int>> iM) { iM['a'] = genRandomHV(); iM['b'] = genRandomHV(); iM['c'] = genRandomHV(); iM['d'] = genRandomHV(); iM['e'] = genRandomHV(); iM['f'] = genRandomHV(); iM['g'] = genRandomHV(); iM['h'] = genRandomHV(); iM['i'] = genRandomHV(); iM['j'] = genRandomHV(); iM['k'] = genRandomHV(); iM['l'] = genRandomHV(); iM['m'] = genRandomHV(); iM['n'] = genRandomHV(); iM['o'] = genRandomHV(); iM['p'] = genRandomHV(); iM['q'] = genRandomHV(); iM['r'] = genRandomHV(); iM['s'] = genRandomHV(); iM['t'] = genRandomHV(); iM['u'] = genRandomHV(); iM['v'] = genRandomHV(); iM['w'] = genRandomHV(); iM['x'] = genRandomHV(); iM['y'] = genRandomHV(); iM['z'] = genRandomHV(); iM[char(32)] = genRandomHV(); return iM; } std::vector<int> lookUpitemMemory(std::map<char, std::vector<int>> iM, char key) { std::vector<int> randomHV(D); randomHV = iM[key]; return randomHV; } double cosine_similarity(std::vector<int> A, std::vector<int> B) { double dot = 0.0, denom_a = 0.0, denom_b = 0.0; for (int i = 0; i < D; ++i) { dot += A[i] * B[i]; denom_a += A[i] * A[i]; denom_b += B[i] * B[i]; } return (dot / (sqrt(denom_a) * sqrt(denom_b))); } std::vector<int> binarizeHV(std::vector<int> langHV) { int threshold = 0; /*for (size_t i = 0; i < langHV.size(); i++) { std::cout << langHV[i] << std::endl; }*/ for (int i = 0; i < D; i++) { if (langHV[i] > threshold) { langHV[i] = 1; } else { langHV[i] = -1; } } /*for (size_t i = 0; i < langHV.size(); i++) { std::cout << langHV[i] << std::endl; }*/ return langHV; } std::vector<int> computeSumHV(std::map<char, std::vector<int>> iM, size_t bufferSize, std::string buffer) { //std::vector<int> h_block0(D, 1); thrust::device_vector<int> st_block2(D, 1); thrust::device_vector<int> st_block3(D, 1); thrust::device_vector<int> st_block4(D, 1); thrust::device_vector<int> st_block5(D, 1); thrust::device_vector<int> st_block6(D, 1); thrust::device_vector<int> st_block7(D, 1); thrust::device_vector<int> block0(D, 1); thrust::device_vector<int> block1(D, 1); thrust::device_vector<int> block2(D, 1); thrust::device_vector<int> block3(D, 1); thrust::device_vector<int> block4(D, 1); thrust::device_vector<int> block5(D, 1); thrust::device_vector<int> block6(D, 1); thrust::device_vector<int> block7(D, 1); thrust::device_vector<int> nGrams(D, 1); thrust::device_vector <int > d_sumHV(D, 0); std::vector<int> sumHV(D, 0); if (N == 3) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block2 = lookUpitemMemory(iM, key); thrust::copy(st_block2.begin(), st_block2.end(), block2.begin()); if (j >= 2) { thrust::copy(block2.begin(), block2.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 4) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block3 = lookUpitemMemory(iM, key); thrust::copy(st_block3.begin(), st_block3.end(), block3.begin()); if (j >= 3) { thrust::copy(block3.begin(), block3.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 5) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block4 = lookUpitemMemory(iM, key); thrust::copy(st_block4.begin(), st_block4.end(), block4.begin()); if (j >= 4) { thrust::copy(block4.begin(), block4.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 6) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); thrust::copy(block5.begin() + 1, block5.end(), block4.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block5 = lookUpitemMemory(iM, key); thrust::copy(st_block5.begin(), st_block5.end(), block5.begin()); if (j >= 5) { thrust::copy(block5.begin(), block5.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block4.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 7) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); thrust::copy(block5.begin() + 1, block5.end(), block4.begin()); thrust::copy(block6.begin() + 1, block6.end(), block5.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block6 = lookUpitemMemory(iM, key); thrust::copy(st_block6.begin(), st_block6.end(), block6.begin()); if (j >= 6) { thrust::copy(block6.begin(), block6.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block5.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block4.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 8) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); thrust::copy(block5.begin() + 1, block5.end(), block4.begin()); thrust::copy(block6.begin() + 1, block6.end(), block5.begin()); thrust::copy(block7.begin() + 1, block7.end(), block6.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block7 = lookUpitemMemory(iM, key); thrust::copy(st_block7.begin(), st_block7.end(), block7.begin()); if (j >= 7) { thrust::copy(block7.begin(), block7.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block6.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block5.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block4.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } thrust::copy(d_sumHV.begin(), d_sumHV.end(), sumHV.begin()); return sumHV; } std::map<std::string, std::vector<int>> buildLanguage(std::map<char, std::vector<int>> iM) { std::map< std::string, std::vector<int>> langAM; std::vector<int> langHV(D); //size_t size = 0; //char *ch = NULL; //FILE *file = NULL; /*errno_t err;*/ std::string langLabels[8]; langLabels[0] = "acq"; langLabels[1] = "cru"; langLabels[2] = "ear"; langLabels[3] = "gra"; langLabels[4] = "int"; langLabels[5] = "mon"; langLabels[6] = "shi"; langLabels[7] = "tra"; //std::string langText; //langText = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\acq.txt"; /*langText[1] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\cru.txt"; langText[2] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\gra.txt"; langText[3] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\int.txt"; langText[4] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\mon.txt"; langText[5] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\ear.txt"; langText[6] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\shi.txt"; langText[7] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\tra.txt"; */ for (int i = 0; i < 8; i++) { /*int TempNumOne = langLabels[i].size(); char Filename[100]; for (int a = 0;a <= TempNumOne;a++) { Filename[a] = langText[a]; } */ switch (i) { case 0: {std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\acq.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV (langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 1: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\cru.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 2: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\ear.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 3: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\gra.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 4: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\int.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 5: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\mon.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 6: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\shi.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 7: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\tra.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } /*} std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\acq.txt"); std::string str; t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg); std::ifstream t("file.txt"); t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file); std::cout << size; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = langHV; /*printf("%s\n", buffer.c_str());*/ } } return langAM; /* for(int i=0;i<size;i++) { printf("%c",ch[i]); } */ } double test(std::map<char, std::vector<int>> iM, std::map<std::string, std::vector<int>> langAM) { double total = 0.0; double correct = 0.0; double accuracy = 0; double maxAngle, angle = 0; std::string predictLang; std::vector<int> textHV; std::string langLabels[8]; langLabels[0] = "acq"; langLabels[1] = "cru"; langLabels[2] = "gra"; langLabels[3] = "int"; langLabels[4] = "mon"; langLabels[5] = "ear"; langLabels[6] = "shi"; langLabels[7] = "tra"; DIR *pdir = NULL; // remember, it's good practice to initialise a pointer to NULL! pdir = opendir ("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\testing_texts"); // "." will refer to the current directory struct dirent *pent = NULL; // I used the current directory, since this is one which will apply to anyone reading // this tutorial~ If I said "C:\\" and you're on Linux, it may get a little confusing! if (pdir == NULL) // if pdir wasn't initialised correctly { // print an error message and exit the program std::cout << "\nERROR! pdir could not be initialised correctly"; exit (3); } // end if while (pent = readdir (pdir)) // while there is still something in the directory to list { if (pent == NULL) // if pent has not been initialised correctly { // print an error message, and exit the program std::cout << "\nERROR! pent could not be initialised correctly"; exit (3); } if (strcmp(pent->d_name, ".") != 0 && strcmp(pent->d_name, "..") != 0) // otherwise, it was initialised correctly. Let's print it on the console: //if (pent->d_name == ".") //{ //continue; //} //else { std::cout << pent->d_name << std::endl; std::string name = pent->d_name; std::string actualLabel = name.substr(0, 3); std::vector<std::string> list{ "C:", "Users", "Mohammed Aashyk", "Documents", "Visual Studio 2015", "Projects", "ConsoleApplication2", "ConsoleApplication2", "Debug", "testing_texts", name }; //std::cout << name; std::string joined = boost::algorithm::join(list, "\\"); //std::cout << joined; std::ifstream t(joined); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Loading test file:" << pent->d_name << std::endl; textHV = computeSumHV(iM, size, buffer); textHV = binarizeHV(textHV); maxAngle = -1; for (int i = 0; i < 8; i++) { angle = cosine_similarity(langAM[langLabels[i]], textHV); if (angle > maxAngle) { maxAngle = angle; predictLang = langLabels[i]; } } if (predictLang == actualLabel) { correct = correct + 1.0; } else { std::cout << predictLang << " --> " << actualLabel << std::endl; } } total = total + 1.0; } //std::cout << pent->d_name; // finally, let's close the directory closedir (pdir); //std::cin.get (); // pause for input // return EXIT_SUCCESS; // everything went OK accuracy = correct / total * 100; return accuracy; } void printPair(const std::pair<char, std::vector<int> > &p) { std::cout << "Key: " << p.first << std::endl; copy(p.second.begin(), p.second.end(), std::ostream_iterator<int>(std::cout, "\t")); } int main() { std::vector<int> rand; std::map<char, std::vector<int>> iM; std::map<std::string, std::vector<int>> langAM; double correct; iM = createItemMemory(iM); //for_each(iM.begin(), iM.end(), printPair); //std::cout << "rand contains:"; //for (std::vector<int>::iterator it = rand.begin(); it != rand.end(); ++it) // std::cout << ' ' << *it; langAM = buildLanguage(iM); /*std::map<std::string, std::vector<int>>::iterator pos; for (pos = langAM.begin(); pos != langAM.end(); ++pos) { std::cout << "key: \"" << pos->first << "\" " << std::endl << "values: \"" ; typedef std::vector<int>::const_iterator ListIterator; for (ListIterator list_iter = pos->second.begin(); list_iter != pos->second.end(); list_iter++) std::cout << " " << *list_iter << std::endl; } /* for (std::map<std::string, std::vector<long int>> ::const_iterator it = langAM.begin();// it != langAM.end(); ++it) { std::cout << it->first << " " << it->second.first << " " << it->second.second << "\n"; }*/ correct = test(iM, langAM); /*std::map<std::string, std::vector<int>>::iterator pos; for (pos = langAM.begin(); pos != langAM.end(); ++pos) { std::cout << "key: \"" << pos->first << "\" " << std::endl << "values: \"" ; typedef std::vector<int>::const_iterator ListIterator; for (ListIterator list_iter = pos->second.begin(); list_iter != pos->second.end(); list_iter++) std::cout << " " << *list_iter << std::endl; } /* for (std::map<std::string, std::vector<long int>> ::const_iterator it = langAM.begin(); it != langAM.end(); ++it) { std::cout << it->first << " " << it->second.first << " " << it->second.second << "\n"; }*/ std::cout << correct << "%" << std::endl << "Run Success!"; }
220a9a5dfeb024e25d8e276cf9d82abba2de9cf9.cu
// ConsoleApplication2.cpp : Defines the entry point for the console application. // #include<iostream> #include<math.h> #include<conio.h> #include<stdlib.h> #include<vector> #include<algorithm> #include<map> #include<iterator> #include <fstream> #include <streambuf> #include<string> #include <dirent.h> #include <boost/algorithm/string.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/functional.h> #include <random> int D = 10000; int N = 3; std::vector<int> genRandomHV() { std::vector<int> randomIndex(D); std::vector<int> randomHV(D); std::mt19937 r{ std::random_device{}() }; if ((D % 2) == 1) { std::cout << "Dimension is odd"; } else { for (int i = 0; i < D; i++) randomIndex[i] = i; std::shuffle(randomIndex.begin(), randomIndex.end(), r); for (int i = 0;i < D / 2;i++) randomHV[randomIndex[i]] = 1; for (int i = D / 2;i < D;i++) randomHV[randomIndex[i]] = -1; } return randomHV; } std::map<char, std::vector<int>> createItemMemory(std::map<char, std::vector<int>> iM) { iM['a'] = genRandomHV(); iM['b'] = genRandomHV(); iM['c'] = genRandomHV(); iM['d'] = genRandomHV(); iM['e'] = genRandomHV(); iM['f'] = genRandomHV(); iM['g'] = genRandomHV(); iM['h'] = genRandomHV(); iM['i'] = genRandomHV(); iM['j'] = genRandomHV(); iM['k'] = genRandomHV(); iM['l'] = genRandomHV(); iM['m'] = genRandomHV(); iM['n'] = genRandomHV(); iM['o'] = genRandomHV(); iM['p'] = genRandomHV(); iM['q'] = genRandomHV(); iM['r'] = genRandomHV(); iM['s'] = genRandomHV(); iM['t'] = genRandomHV(); iM['u'] = genRandomHV(); iM['v'] = genRandomHV(); iM['w'] = genRandomHV(); iM['x'] = genRandomHV(); iM['y'] = genRandomHV(); iM['z'] = genRandomHV(); iM[char(32)] = genRandomHV(); return iM; } std::vector<int> lookUpitemMemory(std::map<char, std::vector<int>> iM, char key) { std::vector<int> randomHV(D); randomHV = iM[key]; return randomHV; } double cosine_similarity(std::vector<int> A, std::vector<int> B) { double dot = 0.0, denom_a = 0.0, denom_b = 0.0; for (int i = 0; i < D; ++i) { dot += A[i] * B[i]; denom_a += A[i] * A[i]; denom_b += B[i] * B[i]; } return (dot / (sqrt(denom_a) * sqrt(denom_b))); } std::vector<int> binarizeHV(std::vector<int> langHV) { int threshold = 0; /*for (size_t i = 0; i < langHV.size(); i++) { std::cout << langHV[i] << std::endl; }*/ for (int i = 0; i < D; i++) { if (langHV[i] > threshold) { langHV[i] = 1; } else { langHV[i] = -1; } } /*for (size_t i = 0; i < langHV.size(); i++) { std::cout << langHV[i] << std::endl; }*/ return langHV; } std::vector<int> computeSumHV(std::map<char, std::vector<int>> iM, size_t bufferSize, std::string buffer) { //std::vector<int> h_block0(D, 1); thrust::device_vector<int> st_block2(D, 1); thrust::device_vector<int> st_block3(D, 1); thrust::device_vector<int> st_block4(D, 1); thrust::device_vector<int> st_block5(D, 1); thrust::device_vector<int> st_block6(D, 1); thrust::device_vector<int> st_block7(D, 1); thrust::device_vector<int> block0(D, 1); thrust::device_vector<int> block1(D, 1); thrust::device_vector<int> block2(D, 1); thrust::device_vector<int> block3(D, 1); thrust::device_vector<int> block4(D, 1); thrust::device_vector<int> block5(D, 1); thrust::device_vector<int> block6(D, 1); thrust::device_vector<int> block7(D, 1); thrust::device_vector<int> nGrams(D, 1); thrust::device_vector <int > d_sumHV(D, 0); std::vector<int> sumHV(D, 0); if (N == 3) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block2 = lookUpitemMemory(iM, key); thrust::copy(st_block2.begin(), st_block2.end(), block2.begin()); if (j >= 2) { thrust::copy(block2.begin(), block2.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 4) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block3 = lookUpitemMemory(iM, key); thrust::copy(st_block3.begin(), st_block3.end(), block3.begin()); if (j >= 3) { thrust::copy(block3.begin(), block3.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 5) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block4 = lookUpitemMemory(iM, key); thrust::copy(st_block4.begin(), st_block4.end(), block4.begin()); if (j >= 4) { thrust::copy(block4.begin(), block4.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 6) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); thrust::copy(block5.begin() + 1, block5.end(), block4.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block5 = lookUpitemMemory(iM, key); thrust::copy(st_block5.begin(), st_block5.end(), block5.begin()); if (j >= 5) { thrust::copy(block5.begin(), block5.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block4.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 7) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); thrust::copy(block5.begin() + 1, block5.end(), block4.begin()); thrust::copy(block6.begin() + 1, block6.end(), block5.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block6 = lookUpitemMemory(iM, key); thrust::copy(st_block6.begin(), st_block6.end(), block6.begin()); if (j >= 6) { thrust::copy(block6.begin(), block6.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block5.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block4.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } else if (N == 8) { for (size_t j = 0; j < bufferSize; j++) { int i = j; char key = buffer[i]; /*for (size_t i = 0; i < block0.size(); i++) { block0[i] = block1[i]; }*/ //std::cout << key; thrust::copy(block1.begin() + 1, block1.end(), block0.begin()); thrust::copy(block2.begin() + 1, block2.end(), block1.begin()); thrust::copy(block3.begin() + 1, block3.end(), block2.begin()); thrust::copy(block4.begin() + 1, block4.end(), block3.begin()); thrust::copy(block5.begin() + 1, block5.end(), block4.begin()); thrust::copy(block6.begin() + 1, block6.end(), block5.begin()); thrust::copy(block7.begin() + 1, block7.end(), block6.begin()); //rotate(h_block1.begin(), h_block1.end() - 1, h_block0.end()); /*for (size_t i = 0; i < block1.size(); i++) { block1[i] = block2[i]; }*/ //block1 = block2; //rotate(block1.begin(), block1.end() - 1, block1.end()); st_block7 = lookUpitemMemory(iM, key); thrust::copy(st_block7.begin(), st_block7.end(), block7.begin()); if (j >= 7) { thrust::copy(block7.begin(), block7.end(), nGrams.begin()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] = block2[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block6.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block1[i]; //} thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block5.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block4.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block3.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block2.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block1.begin(), thrust::multiplies<int>()); thrust::transform(nGrams.begin(), nGrams.end(), nGrams.begin(), block0.begin(), thrust::multiplies<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // std::cout << nGrams[i] << std::endl; //} //for (size_t i = 0; i < nGrams.size(); i++) { // nGrams[i] *= block0[i]; //} thrust::transform(d_sumHV.begin(), d_sumHV.end(), nGrams.begin(), d_sumHV.begin(), thrust::plus<int>()); //for (size_t i = 0; i < nGrams.size(); i++) { // sumHV[i] += nGrams[i]; //} } } } thrust::copy(d_sumHV.begin(), d_sumHV.end(), sumHV.begin()); return sumHV; } std::map<std::string, std::vector<int>> buildLanguage(std::map<char, std::vector<int>> iM) { std::map< std::string, std::vector<int>> langAM; std::vector<int> langHV(D); //size_t size = 0; //char *ch = NULL; //FILE *file = NULL; /*errno_t err;*/ std::string langLabels[8]; langLabels[0] = "acq"; langLabels[1] = "cru"; langLabels[2] = "ear"; langLabels[3] = "gra"; langLabels[4] = "int"; langLabels[5] = "mon"; langLabels[6] = "shi"; langLabels[7] = "tra"; //std::string langText; //langText = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\acq.txt"; /*langText[1] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\cru.txt"; langText[2] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\gra.txt"; langText[3] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\int.txt"; langText[4] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\mon.txt"; langText[5] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\ear.txt"; langText[6] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\shi.txt"; langText[7] = "C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\tra.txt"; */ for (int i = 0; i < 8; i++) { /*int TempNumOne = langLabels[i].size(); char Filename[100]; for (int a = 0;a <= TempNumOne;a++) { Filename[a] = langText[a]; } */ switch (i) { case 0: {std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\acq.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV (langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 1: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\cru.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 2: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\ear.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 3: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\gra.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 4: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\int.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 5: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\mon.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 6: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\shi.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } case 7: { std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\tra.txt"); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Training File:" << langLabels[i] << std::endl; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = binarizeHV(langHV); //langAM[(langLabels[i])] = binarizeHV(langAM[(langLabels[i])]); break; } /*} std::ifstream t("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\acq.txt"); std::string str; t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg); std::ifstream t("file.txt"); t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file); std::cout << size; langHV = computeSumHV(iM, size, buffer); langAM[(langLabels[i])] = langHV; /*printf("%s\n", buffer.c_str());*/ } } return langAM; /* for(int i=0;i<size;i++) { printf("%c",ch[i]); } */ } double test(std::map<char, std::vector<int>> iM, std::map<std::string, std::vector<int>> langAM) { double total = 0.0; double correct = 0.0; double accuracy = 0; double maxAngle, angle = 0; std::string predictLang; std::vector<int> textHV; std::string langLabels[8]; langLabels[0] = "acq"; langLabels[1] = "cru"; langLabels[2] = "gra"; langLabels[3] = "int"; langLabels[4] = "mon"; langLabels[5] = "ear"; langLabels[6] = "shi"; langLabels[7] = "tra"; DIR *pdir = NULL; // remember, it's good practice to initialise a pointer to NULL! pdir = opendir ("C:\\Users\\Mohammed Aashyk\\Documents\\Visual Studio 2015\\Projects\\ConsoleApplication2\\ConsoleApplication2\\Debug\\testing_texts"); // "." will refer to the current directory struct dirent *pent = NULL; // I used the current directory, since this is one which will apply to anyone reading // this tutorial~ If I said "C:\\" and you're on Linux, it may get a little confusing! if (pdir == NULL) // if pdir wasn't initialised correctly { // print an error message and exit the program std::cout << "\nERROR! pdir could not be initialised correctly"; exit (3); } // end if while (pent = readdir (pdir)) // while there is still something in the directory to list { if (pent == NULL) // if pent has not been initialised correctly { // print an error message, and exit the program std::cout << "\nERROR! pent could not be initialised correctly"; exit (3); } if (strcmp(pent->d_name, ".") != 0 && strcmp(pent->d_name, "..") != 0) // otherwise, it was initialised correctly. Let's print it on the console: //if (pent->d_name == ".") //{ //continue; //} //else { std::cout << pent->d_name << std::endl; std::string name = pent->d_name; std::string actualLabel = name.substr(0, 3); std::vector<std::string> list{ "C:", "Users", "Mohammed Aashyk", "Documents", "Visual Studio 2015", "Projects", "ConsoleApplication2", "ConsoleApplication2", "Debug", "testing_texts", name }; //std::cout << name; std::string joined = boost::algorithm::join(list, "\\"); //std::cout << joined; std::ifstream t(joined); std::string str; /*t.seekg(0, std::ios::end); str.reserve(t.tellg()); t.seekg(0, std::ios::beg);*/ /*std::ifstream t("file.txt");*/ t.seekg(0, std::ios::end); size_t size = t.tellg(); std::string buffer(size, ' '); t.seekg(0); t.read(&buffer[0], size); /*fseek(file, 0, SEEK_END); size = ftell(file); rewind(file); ch = (char *)malloc((size + 1) * sizeof(ch)); fread(ch, size, 1, file);*/ std::cout << "Loading test file:" << pent->d_name << std::endl; textHV = computeSumHV(iM, size, buffer); textHV = binarizeHV(textHV); maxAngle = -1; for (int i = 0; i < 8; i++) { angle = cosine_similarity(langAM[langLabels[i]], textHV); if (angle > maxAngle) { maxAngle = angle; predictLang = langLabels[i]; } } if (predictLang == actualLabel) { correct = correct + 1.0; } else { std::cout << predictLang << " --> " << actualLabel << std::endl; } } total = total + 1.0; } //std::cout << pent->d_name; // finally, let's close the directory closedir (pdir); //std::cin.get (); // pause for input // return EXIT_SUCCESS; // everything went OK accuracy = correct / total * 100; return accuracy; } void printPair(const std::pair<char, std::vector<int> > &p) { std::cout << "Key: " << p.first << std::endl; copy(p.second.begin(), p.second.end(), std::ostream_iterator<int>(std::cout, "\t")); } int main() { std::vector<int> rand; std::map<char, std::vector<int>> iM; std::map<std::string, std::vector<int>> langAM; double correct; iM = createItemMemory(iM); //for_each(iM.begin(), iM.end(), printPair); //std::cout << "rand contains:"; //for (std::vector<int>::iterator it = rand.begin(); it != rand.end(); ++it) // std::cout << ' ' << *it; langAM = buildLanguage(iM); /*std::map<std::string, std::vector<int>>::iterator pos; for (pos = langAM.begin(); pos != langAM.end(); ++pos) { std::cout << "key: \"" << pos->first << "\" " << std::endl << "values: \"" ; typedef std::vector<int>::const_iterator ListIterator; for (ListIterator list_iter = pos->second.begin(); list_iter != pos->second.end(); list_iter++) std::cout << " " << *list_iter << std::endl; } /* for (std::map<std::string, std::vector<long int>> ::const_iterator it = langAM.begin();// it != langAM.end(); ++it) { std::cout << it->first << " " << it->second.first << " " << it->second.second << "\n"; }*/ correct = test(iM, langAM); /*std::map<std::string, std::vector<int>>::iterator pos; for (pos = langAM.begin(); pos != langAM.end(); ++pos) { std::cout << "key: \"" << pos->first << "\" " << std::endl << "values: \"" ; typedef std::vector<int>::const_iterator ListIterator; for (ListIterator list_iter = pos->second.begin(); list_iter != pos->second.end(); list_iter++) std::cout << " " << *list_iter << std::endl; } /* for (std::map<std::string, std::vector<long int>> ::const_iterator it = langAM.begin(); it != langAM.end(); ++it) { std::cout << it->first << " " << it->second.first << " " << it->second.second << "\n"; }*/ std::cout << correct << "%" << std::endl << "Run Success!"; }
a20b20436c3685864a73583f979c5b33a352657c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "calculate_tensors.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *SR = NULL; hipMalloc(&SR, XSIZE*YSIZE); const float *fields = NULL; hipMalloc(&fields, XSIZE*YSIZE); const float *norms = NULL; hipMalloc(&norms, XSIZE*YSIZE); const int num_modes = 1; const int Nx = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( calculate_tensors), dim3(gridBlock),dim3(threadBlock), 0, 0, SR,fields,norms,num_modes,Nx); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( calculate_tensors), dim3(gridBlock),dim3(threadBlock), 0, 0, SR,fields,norms,num_modes,Nx); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( calculate_tensors), dim3(gridBlock),dim3(threadBlock), 0, 0, SR,fields,norms,num_modes,Nx); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a20b20436c3685864a73583f979c5b33a352657c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "calculate_tensors.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *SR = NULL; cudaMalloc(&SR, XSIZE*YSIZE); const float *fields = NULL; cudaMalloc(&fields, XSIZE*YSIZE); const float *norms = NULL; cudaMalloc(&norms, XSIZE*YSIZE); const int num_modes = 1; const int Nx = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); calculate_tensors<<<gridBlock,threadBlock>>>(SR,fields,norms,num_modes,Nx); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { calculate_tensors<<<gridBlock,threadBlock>>>(SR,fields,norms,num_modes,Nx); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { calculate_tensors<<<gridBlock,threadBlock>>>(SR,fields,norms,num_modes,Nx); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b7f6325ef11f457adbfccd28e3dbbcf407793b0d.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "open3d/core/CUDAState.cuh" #include "open3d/core/CUDAUtils.h" #include "open3d/core/MemoryManager.h" namespace open3d { namespace core { void* CUDAMemoryManager::Malloc(size_t byte_size, const Device& device) { CUDAScopedDevice scoped_device(device); void* ptr; if (device.GetType() == Device::DeviceType::CUDA) { #if CUDART_VERSION >= 11020 OPEN3D_CUDA_CHECK(hipMallocAsync(static_cast<void**>(&ptr), byte_size, cuda::GetStream())); #else OPEN3D_CUDA_CHECK(hipMalloc(static_cast<void**>(&ptr), byte_size)); #endif } else { utility::LogError("CUDAMemoryManager::Malloc: Unimplemented device."); } return ptr; } void CUDAMemoryManager::Free(void* ptr, const Device& device) { CUDAScopedDevice scoped_device(device); if (device.GetType() == Device::DeviceType::CUDA) { if (ptr && IsCUDAPointer(ptr, device)) { #if CUDART_VERSION >= 11020 OPEN3D_CUDA_CHECK(hipFreeAsync(ptr, cuda::GetStream())); #else OPEN3D_CUDA_CHECK(hipFree(ptr)); #endif } } else { utility::LogError("CUDAMemoryManager::Free: Unimplemented device."); } } void CUDAMemoryManager::Memcpy(void* dst_ptr, const Device& dst_device, const void* src_ptr, const Device& src_device, size_t num_bytes) { if (dst_device.GetType() == Device::DeviceType::CUDA && src_device.GetType() == Device::DeviceType::CPU) { if (!IsCUDAPointer(dst_ptr, dst_device)) { utility::LogError("dst_ptr is not a CUDA pointer."); } CUDAScopedDevice scoped_device(dst_device); OPEN3D_CUDA_CHECK(hipMemcpyAsync(dst_ptr, src_ptr, num_bytes, hipMemcpyHostToDevice, cuda::GetStream())); } else if (dst_device.GetType() == Device::DeviceType::CPU && src_device.GetType() == Device::DeviceType::CUDA) { if (!IsCUDAPointer(src_ptr, src_device)) { utility::LogError("src_ptr is not a CUDA pointer."); } CUDAScopedDevice scoped_device(src_device); OPEN3D_CUDA_CHECK(hipMemcpyAsync(dst_ptr, src_ptr, num_bytes, hipMemcpyDeviceToHost, cuda::GetStream())); } else if (dst_device.GetType() == Device::DeviceType::CUDA && src_device.GetType() == Device::DeviceType::CUDA) { if (!IsCUDAPointer(dst_ptr, dst_device)) { utility::LogError("dst_ptr is not a CUDA pointer."); } if (!IsCUDAPointer(src_ptr, src_device)) { utility::LogError("src_ptr is not a CUDA pointer."); } if (dst_device == src_device) { CUDAScopedDevice scoped_device(src_device); OPEN3D_CUDA_CHECK(hipMemcpyAsync(dst_ptr, src_ptr, num_bytes, hipMemcpyDeviceToDevice, cuda::GetStream())); } else if (CUDAState::GetInstance()->IsP2PEnabled(src_device.GetID(), dst_device.GetID())) { OPEN3D_CUDA_CHECK(hipMemcpyPeerAsync( dst_ptr, dst_device.GetID(), src_ptr, src_device.GetID(), num_bytes, cuda::GetStream())); } else { void* cpu_buf = MemoryManager::Malloc(num_bytes, Device("CPU:0")); { CUDAScopedDevice scoped_device(src_device); OPEN3D_CUDA_CHECK(hipMemcpyAsync(cpu_buf, src_ptr, num_bytes, hipMemcpyDeviceToHost, cuda::GetStream())); } { CUDAScopedDevice scoped_device(dst_device); OPEN3D_CUDA_CHECK(hipMemcpyAsync(dst_ptr, cpu_buf, num_bytes, hipMemcpyHostToDevice, cuda::GetStream())); } MemoryManager::Free(cpu_buf, Device("CPU:0")); } } else { utility::LogError("Wrong hipMemcpyKind."); } } bool CUDAMemoryManager::IsCUDAPointer(const void* ptr, const Device& device) { CUDAScopedDevice scoped_device(device); hipPointerAttribute_t attributes; hipPointerGetAttributes(&attributes, ptr); return attributes.devicePointer != nullptr ? true : false; } } // namespace core } // namespace open3d
b7f6325ef11f457adbfccd28e3dbbcf407793b0d.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <cuda.h> #include <cuda_runtime.h> #include "open3d/core/CUDAState.cuh" #include "open3d/core/CUDAUtils.h" #include "open3d/core/MemoryManager.h" namespace open3d { namespace core { void* CUDAMemoryManager::Malloc(size_t byte_size, const Device& device) { CUDAScopedDevice scoped_device(device); void* ptr; if (device.GetType() == Device::DeviceType::CUDA) { #if CUDART_VERSION >= 11020 OPEN3D_CUDA_CHECK(cudaMallocAsync(static_cast<void**>(&ptr), byte_size, cuda::GetStream())); #else OPEN3D_CUDA_CHECK(cudaMalloc(static_cast<void**>(&ptr), byte_size)); #endif } else { utility::LogError("CUDAMemoryManager::Malloc: Unimplemented device."); } return ptr; } void CUDAMemoryManager::Free(void* ptr, const Device& device) { CUDAScopedDevice scoped_device(device); if (device.GetType() == Device::DeviceType::CUDA) { if (ptr && IsCUDAPointer(ptr, device)) { #if CUDART_VERSION >= 11020 OPEN3D_CUDA_CHECK(cudaFreeAsync(ptr, cuda::GetStream())); #else OPEN3D_CUDA_CHECK(cudaFree(ptr)); #endif } } else { utility::LogError("CUDAMemoryManager::Free: Unimplemented device."); } } void CUDAMemoryManager::Memcpy(void* dst_ptr, const Device& dst_device, const void* src_ptr, const Device& src_device, size_t num_bytes) { if (dst_device.GetType() == Device::DeviceType::CUDA && src_device.GetType() == Device::DeviceType::CPU) { if (!IsCUDAPointer(dst_ptr, dst_device)) { utility::LogError("dst_ptr is not a CUDA pointer."); } CUDAScopedDevice scoped_device(dst_device); OPEN3D_CUDA_CHECK(cudaMemcpyAsync(dst_ptr, src_ptr, num_bytes, cudaMemcpyHostToDevice, cuda::GetStream())); } else if (dst_device.GetType() == Device::DeviceType::CPU && src_device.GetType() == Device::DeviceType::CUDA) { if (!IsCUDAPointer(src_ptr, src_device)) { utility::LogError("src_ptr is not a CUDA pointer."); } CUDAScopedDevice scoped_device(src_device); OPEN3D_CUDA_CHECK(cudaMemcpyAsync(dst_ptr, src_ptr, num_bytes, cudaMemcpyDeviceToHost, cuda::GetStream())); } else if (dst_device.GetType() == Device::DeviceType::CUDA && src_device.GetType() == Device::DeviceType::CUDA) { if (!IsCUDAPointer(dst_ptr, dst_device)) { utility::LogError("dst_ptr is not a CUDA pointer."); } if (!IsCUDAPointer(src_ptr, src_device)) { utility::LogError("src_ptr is not a CUDA pointer."); } if (dst_device == src_device) { CUDAScopedDevice scoped_device(src_device); OPEN3D_CUDA_CHECK(cudaMemcpyAsync(dst_ptr, src_ptr, num_bytes, cudaMemcpyDeviceToDevice, cuda::GetStream())); } else if (CUDAState::GetInstance()->IsP2PEnabled(src_device.GetID(), dst_device.GetID())) { OPEN3D_CUDA_CHECK(cudaMemcpyPeerAsync( dst_ptr, dst_device.GetID(), src_ptr, src_device.GetID(), num_bytes, cuda::GetStream())); } else { void* cpu_buf = MemoryManager::Malloc(num_bytes, Device("CPU:0")); { CUDAScopedDevice scoped_device(src_device); OPEN3D_CUDA_CHECK(cudaMemcpyAsync(cpu_buf, src_ptr, num_bytes, cudaMemcpyDeviceToHost, cuda::GetStream())); } { CUDAScopedDevice scoped_device(dst_device); OPEN3D_CUDA_CHECK(cudaMemcpyAsync(dst_ptr, cpu_buf, num_bytes, cudaMemcpyHostToDevice, cuda::GetStream())); } MemoryManager::Free(cpu_buf, Device("CPU:0")); } } else { utility::LogError("Wrong cudaMemcpyKind."); } } bool CUDAMemoryManager::IsCUDAPointer(const void* ptr, const Device& device) { CUDAScopedDevice scoped_device(device); cudaPointerAttributes attributes; cudaPointerGetAttributes(&attributes, ptr); return attributes.devicePointer != nullptr ? true : false; } } // namespace core } // namespace open3d
8cbabe8e62b2ae708c0c0f75653980c796bf4609.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <opencv2/imgproc.hpp> #include <opencv2/imgcodecs.hpp> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cuda_device_runtime_api.h> #include <device_launch_parameters.h> #define BLUE_INDEX 0 #define RED_INDEX 1 #define GREEN_INDEX 2 void cudaCheckThrow(const char * file, unsigned line, const char *statement, hipError_t error) { std::string m_error_message; if(error == hipSuccess) { return; } throw std::invalid_argument(hipGetErrorString(error)); } #define CHECK_ERROR(value) cudaCheckThrow(__FILE__, __LINE__, #value, value) using namespace cv; struct BRG_IMAGE { uchar * b; uchar * r; uchar * g; }; class BRG { public: uchar b; uchar r; uchar g; BRG operator-(const BRG & val) { BRG output; output.r = std::abs((int)r - (int)val.r); output.g = std::abs((int)g - (int)val.g); output.b = std::abs((int)b - (int)val.b); return output; } float operator^(int exponent) { // std::cout << "^: " << (float) b << ", " << (float) r << ", " << (float) g << " = " << ::pow((float)b, exponent) + ::pow((float)r,exponent) + ::pow((float)g, exponent) << std::endl; return ::pow((float)b, exponent) + ::pow((float)r,exponent) + ::pow((float)g, exponent); } }; ::std::ostream & operator<<(::std::ostream & os, const BRG & c) { os << "[" << (int) c.b << " " << (int) c.r << " " << (int) c.g << "]"; return os; } __global__ void distanceCalc(BRG_IMAGE d_image, BRG * d_kMeans, uint cols, uint rows, float * output) { uint colId = threadIdx.y; uint rowId = threadIdx.x; uint K = blockIdx.z; if(colId < cols && rowId < rows) { BRG pixel; uint pixelId = rowId * cols + colId; pixel.g = d_image.g[pixelId]; pixel.r = d_image.r[pixelId]; pixel.b = d_image.b[pixelId]; output[K * cols * rows + pixelId] = pow(((float) pixel.b - (float) d_kMeans[K].b),2) + pow(((float) pixel.b - (float) d_kMeans[K].b),2) + pow(((float) pixel.b - (float) d_kMeans[K].b),2); } } __global__ void assigner(float * d_distanceBuffer, BRG_IMAGE d_image, BRG * d_kMeans, uint K, uint cols, uint rows, BRG_IMAGE d_output) { uint colId = threadIdx.y; uint rowId = threadIdx.x; if(colId < cols && rowId < rows) { uint pixelId = rowId * cols + colId; uint minId = 0; float min = d_distanceBuffer[pixelId]; for(int i = 1; i < K; i++) { float distanceVal = d_distanceBuffer[i * cols * rows + pixelId]; if(distanceVal < min) { min = distanceVal; minId = i; } } d_output.b[pixelId] = d_kMeans[minId].b; d_output.r[pixelId] = d_kMeans[minId].r; d_output.g[pixelId] = d_kMeans[minId].g; } } bool gpuAlgo(const BRG_IMAGE & d_image, BRG * & d_kMeans, uint K, uint cols, uint rows, BRG_IMAGE & d_output) { float * d_distanceBuffer; int length = cols * rows; CHECK_ERROR(hipMalloc(&d_distanceBuffer, sizeof(float) * cols * rows * K)); int blockXSize = 32; int blockYSize = 32; int gridXSize = (rows%blockXSize == 0) ? rows/blockXSize : rows / blockXSize + 1; int gridYSize = (cols%blockYSize == 0) ? cols/blockYSize : cols / blockYSize + 1; int gridZsize = K; dim3 blockSize(blockXSize, blockYSize, 1); dim3 gridSize(gridXSize, gridYSize, gridZsize); hipLaunchKernelGGL(( distanceCalc), dim3(gridSize), dim3(blockSize), 0, 0, d_image, d_kMeans, cols, rows, d_distanceBuffer); CHECK_ERROR(hipGetLastError()); dim3 blockSize2(blockXSize, blockYSize, 1); dim3 gridSize2(gridXSize, gridYSize, 1); hipLaunchKernelGGL(( assigner), dim3(gridSize2), dim3(blockSize2), 0, 0, d_distanceBuffer, d_image, d_kMeans, K, cols, rows, d_output); CHECK_ERROR(hipGetLastError()); } void gpuMeans(const BRG_IMAGE & image, uint cols, uint rows, uint * initialFeaturePos, uint K, BRG_IMAGE & output) { BRG_IMAGE d_image; BRG_IMAGE d_output; BRG * d_kMeans; BRG * h_kMeans = new BRG[K]; int length = cols * rows; CHECK_ERROR(hipMalloc(&d_image.g, sizeof(uchar) * length)); CHECK_ERROR(hipMalloc(&d_image.r, sizeof(uchar) * length)); CHECK_ERROR(hipMalloc(&d_image.b, sizeof(uchar) * length)); CHECK_ERROR(hipMalloc(&d_output.g, sizeof(uchar) * length)); CHECK_ERROR(hipMalloc(&d_output.r, sizeof(uchar) * length)); CHECK_ERROR(hipMalloc(&d_output.b, sizeof(uchar) * length)); CHECK_ERROR(hipMalloc(&d_kMeans, sizeof(BRG) * K)); CHECK_ERROR(hipMemcpy(d_image.g, image.g, sizeof(uchar) * length, hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(d_image.r, image.r, sizeof(uchar) * length, hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(d_image.b, image.b, sizeof(uchar) * length, hipMemcpyHostToDevice)); for(int i = 0; i < K ; i++) { uint initCol = initialFeaturePos[2 * i]; uint initRow = initialFeaturePos[2 * i + 1]; h_kMeans[i].b = image.b[initCol + initRow * cols]; h_kMeans[i].r = image.r[initCol + initRow * cols]; h_kMeans[i].g = image.g[initCol + initRow * cols]; } CHECK_ERROR(hipMemcpy(d_kMeans, h_kMeans, sizeof(BRG) * K, hipMemcpyHostToDevice)); gpuAlgo(d_image, d_kMeans, K, cols, rows, d_output); CHECK_ERROR(hipMemcpy(output.r, d_output.r, sizeof(uchar) * length, hipMemcpyDeviceToHost)); CHECK_ERROR(hipMemcpy(output.g, d_output.g, sizeof(uchar) * length, hipMemcpyDeviceToHost)); CHECK_ERROR(hipMemcpy(output.b, d_output.b, sizeof(uchar) * length, hipMemcpyDeviceToHost)); } void cpuKMeans(const BRG_IMAGE & image, uint cols, uint rows, uint * initialFeaturePos, uint K, BRG_IMAGE & output) { BRG * kMeans = new BRG[K]; memset(kMeans, 0, sizeof(BRG) * K); int * tempR = new int[K]; int * tempG = new int[K]; int * tempB = new int[K]; int * occur = new int[K]; for(int i = 0; i < K ; i++) { uint initCol = initialFeaturePos[2 * i]; uint initRow = initialFeaturePos[2 * i + 1]; tempB[i] = image.b[initCol + initRow * cols]; tempR[i] = image.r[initCol + initRow * cols]; tempG[i] = image.g[initCol + initRow * cols]; } for(int i = 0; i < K; i++) occur[i] = 1; // std::cout << "Image" << std::endl; int iterations = 1; while(true && iterations < 10) { bool isSteadyState = true; for(int i = 0; i < K ; i++) { if(kMeans[i].g != tempG[i]/occur[i] || kMeans[i].r != tempR[i]/occur[i] || kMeans[i].b != tempB[i]/occur[i]) { kMeans[i].g = tempG[i]/occur[i]; kMeans[i].r = tempR[i]/occur[i]; kMeans[i].b = tempB[i]/occur[i]; isSteadyState &= false; } else { isSteadyState &= true; } } if(isSteadyState) break; for(int c = 0; c < K; c++) { tempR[c]= 0; tempG[c]= 0; tempB[c]= 0; occur[c]= 0; } std::cout << "KMeans : " << std::endl; for(int i = 0; i< K ; i++) { std::cout << kMeans[i] << std::endl; } int length = cols * rows; for(int pixelId = 0; pixelId < length; pixelId++) { BRG pixel; pixel.r = image.r[pixelId]; pixel.b = image.b[pixelId]; pixel.g = image.g[pixelId]; std::vector<float> distances; for(int k = 0; k < K; k++) { BRG kVal = kMeans[k]; distances.push_back((pixel - kVal)^2); } int maxK = std::distance(distances.begin(), std::min_element(distances.begin(), distances.end())); if(maxK == 5) { std::cout << pixelId << ", " << pixel << ", "; for(const auto & it : distances) std::cout << it << ", "; std::cout << std::endl; } tempR[maxK] += pixel.r; tempG[maxK] += pixel.g; tempB[maxK] += pixel.b; occur[maxK]++; output.b[pixelId] = kMeans[maxK].b; output.r[pixelId] = kMeans[maxK].r; output.g[pixelId] = kMeans[maxK].g; } iterations++; } /*if(pixelId == 0) { std::cout << kMeans[maxK] << ", ("; for(int r =0; r < K;r++) { std::cout << kMeans[r] << " - " << ((pixel - kMeans[r])^2) << ", "; } std::cout << " )" << std::endl; } */ } bool processImage(const BRG_IMAGE & image, uint cols, uint rows, BRG_IMAGE & outputImage) { //uint initialFeaturePos[] = {647, 793, 1661,1019, 362,939}; uint initialFeaturePos[] = {647, 793, 1661,1019, 362,939}; //uint initialFeaturePos[] = {2 ,1, 0, 1}; uint K = 3; if((sizeof(initialFeaturePos)/sizeof(uint))/(K*2) != 1 || (sizeof(initialFeaturePos)/sizeof(uint))%(K*2) != 0) { std::cerr << "Mismatch in K and intial features, K : " << K << ", initial feature count : " << ((float)sizeof(initialFeaturePos))/sizeof(uint)/(K*2) << std::endl; return false; } for(int i = 0; i< K ; i++) { if(initialFeaturePos[i * 2] >= cols || initialFeaturePos[i * 2 + 1] >= rows) { std::cerr << "initial Positions out of bound, initial positions : (" << initialFeaturePos[i * 2] << ", " << initialFeaturePos[i * 2 + 1] << ") , size : (" << cols << ", " << rows << ")" << std::endl; return false; } } uint length = cols * rows; BRG_IMAGE cpuOut; cpuOut.g = new uchar[length]; cpuOut.b = new uchar[length]; cpuOut.r = new uchar[length]; memset(cpuOut.g, 255, length * sizeof(uchar)); memset(cpuOut.r, 255, length * sizeof(uchar)); memset(cpuOut.b, 255, length * sizeof(uchar)); //cpuKMeans(image, cols, rows, initialFeaturePos, K, cpuOut); gpuMeans(image, cols, rows, initialFeaturePos, K, cpuOut); outputImage = cpuOut; return true; } int main() { try { Mat src = imread("/x01/bhashithaa/image/src/circles.jpg", IMREAD_COLOR); Mat matbrg[3]; if(src.empty()) { std::cerr << "Cannot load image" << std::endl; return 1; } else { std::cout << "Image loaded with Cols : " << src.cols << ", " << src.rows << std::endl; } split(src, matbrg); BRG_IMAGE brg; brg.b = matbrg[BLUE_INDEX].data; brg.r = matbrg[RED_INDEX].data; brg.g = matbrg[GREEN_INDEX].data; uint cols = src.cols; uint rows = src.rows; BRG_IMAGE outputImage; if(!processImage(brg, cols, rows, outputImage)) return -1; //std::cout << (int)outputImage.b[0] << ", " << (int)outputImage.r[0] << ", " << (int) *outputImage.g << std::endl; Mat outputIM[3]; outputIM[0] = Mat(rows, cols, CV_8UC1, outputImage.b); outputIM[1] = Mat(rows, cols, CV_8UC1, outputImage.r); outputIM[2] = Mat(rows, cols, CV_8UC1, outputImage.g); Mat outputComb; merge(outputIM,3, outputComb); cv::imwrite("OutputKoala.jpg", outputComb); } catch(std::exception &ex) { std::cerr << "Exception thrown : " << ex.what() << std::endl; } }
8cbabe8e62b2ae708c0c0f75653980c796bf4609.cu
#include <iostream> #include <opencv2/imgproc.hpp> #include <opencv2/imgcodecs.hpp> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <cuda_device_runtime_api.h> #include <device_launch_parameters.h> #define BLUE_INDEX 0 #define RED_INDEX 1 #define GREEN_INDEX 2 void cudaCheckThrow(const char * file, unsigned line, const char *statement, cudaError_t error) { std::string m_error_message; if(error == cudaSuccess) { return; } throw std::invalid_argument(cudaGetErrorString(error)); } #define CHECK_ERROR(value) cudaCheckThrow(__FILE__, __LINE__, #value, value) using namespace cv; struct BRG_IMAGE { uchar * b; uchar * r; uchar * g; }; class BRG { public: uchar b; uchar r; uchar g; BRG operator-(const BRG & val) { BRG output; output.r = std::abs((int)r - (int)val.r); output.g = std::abs((int)g - (int)val.g); output.b = std::abs((int)b - (int)val.b); return output; } float operator^(int exponent) { // std::cout << "^: " << (float) b << ", " << (float) r << ", " << (float) g << " = " << std::pow((float)b, exponent) + std::pow((float)r,exponent) + std::pow((float)g, exponent) << std::endl; return std::pow((float)b, exponent) + std::pow((float)r,exponent) + std::pow((float)g, exponent); } }; ::std::ostream & operator<<(::std::ostream & os, const BRG & c) { os << "[" << (int) c.b << " " << (int) c.r << " " << (int) c.g << "]"; return os; } __global__ void distanceCalc(BRG_IMAGE d_image, BRG * d_kMeans, uint cols, uint rows, float * output) { uint colId = threadIdx.y; uint rowId = threadIdx.x; uint K = blockIdx.z; if(colId < cols && rowId < rows) { BRG pixel; uint pixelId = rowId * cols + colId; pixel.g = d_image.g[pixelId]; pixel.r = d_image.r[pixelId]; pixel.b = d_image.b[pixelId]; output[K * cols * rows + pixelId] = pow(((float) pixel.b - (float) d_kMeans[K].b),2) + pow(((float) pixel.b - (float) d_kMeans[K].b),2) + pow(((float) pixel.b - (float) d_kMeans[K].b),2); } } __global__ void assigner(float * d_distanceBuffer, BRG_IMAGE d_image, BRG * d_kMeans, uint K, uint cols, uint rows, BRG_IMAGE d_output) { uint colId = threadIdx.y; uint rowId = threadIdx.x; if(colId < cols && rowId < rows) { uint pixelId = rowId * cols + colId; uint minId = 0; float min = d_distanceBuffer[pixelId]; for(int i = 1; i < K; i++) { float distanceVal = d_distanceBuffer[i * cols * rows + pixelId]; if(distanceVal < min) { min = distanceVal; minId = i; } } d_output.b[pixelId] = d_kMeans[minId].b; d_output.r[pixelId] = d_kMeans[minId].r; d_output.g[pixelId] = d_kMeans[minId].g; } } bool gpuAlgo(const BRG_IMAGE & d_image, BRG * & d_kMeans, uint K, uint cols, uint rows, BRG_IMAGE & d_output) { float * d_distanceBuffer; int length = cols * rows; CHECK_ERROR(cudaMalloc(&d_distanceBuffer, sizeof(float) * cols * rows * K)); int blockXSize = 32; int blockYSize = 32; int gridXSize = (rows%blockXSize == 0) ? rows/blockXSize : rows / blockXSize + 1; int gridYSize = (cols%blockYSize == 0) ? cols/blockYSize : cols / blockYSize + 1; int gridZsize = K; dim3 blockSize(blockXSize, blockYSize, 1); dim3 gridSize(gridXSize, gridYSize, gridZsize); distanceCalc<<<gridSize, blockSize>>>(d_image, d_kMeans, cols, rows, d_distanceBuffer); CHECK_ERROR(cudaGetLastError()); dim3 blockSize2(blockXSize, blockYSize, 1); dim3 gridSize2(gridXSize, gridYSize, 1); assigner<<<gridSize2, blockSize2>>>(d_distanceBuffer, d_image, d_kMeans, K, cols, rows, d_output); CHECK_ERROR(cudaGetLastError()); } void gpuMeans(const BRG_IMAGE & image, uint cols, uint rows, uint * initialFeaturePos, uint K, BRG_IMAGE & output) { BRG_IMAGE d_image; BRG_IMAGE d_output; BRG * d_kMeans; BRG * h_kMeans = new BRG[K]; int length = cols * rows; CHECK_ERROR(cudaMalloc(&d_image.g, sizeof(uchar) * length)); CHECK_ERROR(cudaMalloc(&d_image.r, sizeof(uchar) * length)); CHECK_ERROR(cudaMalloc(&d_image.b, sizeof(uchar) * length)); CHECK_ERROR(cudaMalloc(&d_output.g, sizeof(uchar) * length)); CHECK_ERROR(cudaMalloc(&d_output.r, sizeof(uchar) * length)); CHECK_ERROR(cudaMalloc(&d_output.b, sizeof(uchar) * length)); CHECK_ERROR(cudaMalloc(&d_kMeans, sizeof(BRG) * K)); CHECK_ERROR(cudaMemcpy(d_image.g, image.g, sizeof(uchar) * length, cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(d_image.r, image.r, sizeof(uchar) * length, cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(d_image.b, image.b, sizeof(uchar) * length, cudaMemcpyHostToDevice)); for(int i = 0; i < K ; i++) { uint initCol = initialFeaturePos[2 * i]; uint initRow = initialFeaturePos[2 * i + 1]; h_kMeans[i].b = image.b[initCol + initRow * cols]; h_kMeans[i].r = image.r[initCol + initRow * cols]; h_kMeans[i].g = image.g[initCol + initRow * cols]; } CHECK_ERROR(cudaMemcpy(d_kMeans, h_kMeans, sizeof(BRG) * K, cudaMemcpyHostToDevice)); gpuAlgo(d_image, d_kMeans, K, cols, rows, d_output); CHECK_ERROR(cudaMemcpy(output.r, d_output.r, sizeof(uchar) * length, cudaMemcpyDeviceToHost)); CHECK_ERROR(cudaMemcpy(output.g, d_output.g, sizeof(uchar) * length, cudaMemcpyDeviceToHost)); CHECK_ERROR(cudaMemcpy(output.b, d_output.b, sizeof(uchar) * length, cudaMemcpyDeviceToHost)); } void cpuKMeans(const BRG_IMAGE & image, uint cols, uint rows, uint * initialFeaturePos, uint K, BRG_IMAGE & output) { BRG * kMeans = new BRG[K]; memset(kMeans, 0, sizeof(BRG) * K); int * tempR = new int[K]; int * tempG = new int[K]; int * tempB = new int[K]; int * occur = new int[K]; for(int i = 0; i < K ; i++) { uint initCol = initialFeaturePos[2 * i]; uint initRow = initialFeaturePos[2 * i + 1]; tempB[i] = image.b[initCol + initRow * cols]; tempR[i] = image.r[initCol + initRow * cols]; tempG[i] = image.g[initCol + initRow * cols]; } for(int i = 0; i < K; i++) occur[i] = 1; // std::cout << "Image" << std::endl; int iterations = 1; while(true && iterations < 10) { bool isSteadyState = true; for(int i = 0; i < K ; i++) { if(kMeans[i].g != tempG[i]/occur[i] || kMeans[i].r != tempR[i]/occur[i] || kMeans[i].b != tempB[i]/occur[i]) { kMeans[i].g = tempG[i]/occur[i]; kMeans[i].r = tempR[i]/occur[i]; kMeans[i].b = tempB[i]/occur[i]; isSteadyState &= false; } else { isSteadyState &= true; } } if(isSteadyState) break; for(int c = 0; c < K; c++) { tempR[c]= 0; tempG[c]= 0; tempB[c]= 0; occur[c]= 0; } std::cout << "KMeans : " << std::endl; for(int i = 0; i< K ; i++) { std::cout << kMeans[i] << std::endl; } int length = cols * rows; for(int pixelId = 0; pixelId < length; pixelId++) { BRG pixel; pixel.r = image.r[pixelId]; pixel.b = image.b[pixelId]; pixel.g = image.g[pixelId]; std::vector<float> distances; for(int k = 0; k < K; k++) { BRG kVal = kMeans[k]; distances.push_back((pixel - kVal)^2); } int maxK = std::distance(distances.begin(), std::min_element(distances.begin(), distances.end())); if(maxK == 5) { std::cout << pixelId << ", " << pixel << ", "; for(const auto & it : distances) std::cout << it << ", "; std::cout << std::endl; } tempR[maxK] += pixel.r; tempG[maxK] += pixel.g; tempB[maxK] += pixel.b; occur[maxK]++; output.b[pixelId] = kMeans[maxK].b; output.r[pixelId] = kMeans[maxK].r; output.g[pixelId] = kMeans[maxK].g; } iterations++; } /*if(pixelId == 0) { std::cout << kMeans[maxK] << ", ("; for(int r =0; r < K;r++) { std::cout << kMeans[r] << " - " << ((pixel - kMeans[r])^2) << ", "; } std::cout << " )" << std::endl; } */ } bool processImage(const BRG_IMAGE & image, uint cols, uint rows, BRG_IMAGE & outputImage) { //uint initialFeaturePos[] = {647, 793, 1661,1019, 362,939}; uint initialFeaturePos[] = {647, 793, 1661,1019, 362,939}; //uint initialFeaturePos[] = {2 ,1, 0, 1}; uint K = 3; if((sizeof(initialFeaturePos)/sizeof(uint))/(K*2) != 1 || (sizeof(initialFeaturePos)/sizeof(uint))%(K*2) != 0) { std::cerr << "Mismatch in K and intial features, K : " << K << ", initial feature count : " << ((float)sizeof(initialFeaturePos))/sizeof(uint)/(K*2) << std::endl; return false; } for(int i = 0; i< K ; i++) { if(initialFeaturePos[i * 2] >= cols || initialFeaturePos[i * 2 + 1] >= rows) { std::cerr << "initial Positions out of bound, initial positions : (" << initialFeaturePos[i * 2] << ", " << initialFeaturePos[i * 2 + 1] << ") , size : (" << cols << ", " << rows << ")" << std::endl; return false; } } uint length = cols * rows; BRG_IMAGE cpuOut; cpuOut.g = new uchar[length]; cpuOut.b = new uchar[length]; cpuOut.r = new uchar[length]; memset(cpuOut.g, 255, length * sizeof(uchar)); memset(cpuOut.r, 255, length * sizeof(uchar)); memset(cpuOut.b, 255, length * sizeof(uchar)); //cpuKMeans(image, cols, rows, initialFeaturePos, K, cpuOut); gpuMeans(image, cols, rows, initialFeaturePos, K, cpuOut); outputImage = cpuOut; return true; } int main() { try { Mat src = imread("/x01/bhashithaa/image/src/circles.jpg", IMREAD_COLOR); Mat matbrg[3]; if(src.empty()) { std::cerr << "Cannot load image" << std::endl; return 1; } else { std::cout << "Image loaded with Cols : " << src.cols << ", " << src.rows << std::endl; } split(src, matbrg); BRG_IMAGE brg; brg.b = matbrg[BLUE_INDEX].data; brg.r = matbrg[RED_INDEX].data; brg.g = matbrg[GREEN_INDEX].data; uint cols = src.cols; uint rows = src.rows; BRG_IMAGE outputImage; if(!processImage(brg, cols, rows, outputImage)) return -1; //std::cout << (int)outputImage.b[0] << ", " << (int)outputImage.r[0] << ", " << (int) *outputImage.g << std::endl; Mat outputIM[3]; outputIM[0] = Mat(rows, cols, CV_8UC1, outputImage.b); outputIM[1] = Mat(rows, cols, CV_8UC1, outputImage.r); outputIM[2] = Mat(rows, cols, CV_8UC1, outputImage.g); Mat outputComb; merge(outputIM,3, outputComb); cv::imwrite("OutputKoala.jpg", outputComb); } catch(std::exception &ex) { std::cerr << "Exception thrown : " << ex.what() << std::endl; } }
43dd0e4b7c1fa1c9bb7f4a61044a1feb6ac80ee1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hipfft.h" #include <stdio.h> #include "CudaHelpLib.h" static int isCudaOk = 0; static _CUDA_DEV_INFO deviceinfo; ///dev constant define __constant__ int devC_cols; __constant__ int devC_rows; __constant__ int devC_divc; __constant__ int devC_x; __constant__ int devC_y; __constant__ float devC_f1; __constant__ float devC_xe; __constant__ float devC_ye; __constant__ float devC_ze; __constant__ unsigned int devC_Palette[512]; //float guassianTable[512]; //////////////////// static int *dev_temp_4M1 = 0; static int *dev_temp_4M2 = 0; static int *dev_temp_4M3 = 0; static unsigned char *dev_background_4M = 0; static unsigned char *dev_cuboid = 0; //////////////////////////////// //>>>>>>>>>>>>>>>>share lib main func #ifndef Q_OS_LINUX #include "Windows.h" BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved ) { switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: break; case DLL_THREAD_ATTACH: break; case DLL_THREAD_DETACH: break; case DLL_PROCESS_DETACH: CuH_FreeTempCudaMem(); break; } return TRUE; } #endif //<<<<<<<<<<<<< ////////////////////////////////////////////////////////////////////////// // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>cuda kernel function ////////////////// ////////////////////////////////////////////////////////////////////// __global__ void magnitude32F_Kernel(FFT_Complex * datain, FFT_Real * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; float d = datain[index].re*datain[index].re + datain[index].im*datain[index].im; d = sqrtf(d); dataout[index] = d; } } __global__ void logAddBeta32F_Kernel(FFT_Real * datain, FFT_Real * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; float beta = *((float*)(&devC_y)); float d = datain[index]; d = d + beta; d = logf(d); dataout[index] = d; } } __global__ void cvtAndScale32Fto16U_Kernel(FFT_Real * datain, unsigned short * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; float d = *((float*)(&devC_x)); float beta = *((float*)(&devC_y)); d = d*datain[index] + beta; d = (d >= 0 && d <= 65535.0f)*d + (d > 65535.0f)*65535.0f; dataout[index] = d; } } __global__ void cpyRealToComplex_Kernel(FFT_Real * datain, FFT_Complex * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; dataout[index].re = datain[index]; dataout[index].im = 0; } } __global__ void cpy16UC1ToComplex_Kernel(unsigned short * datain, FFT_Complex * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; dataout[index].re = datain[index]; dataout[index].im = 0; } } __global__ void ROI_Complex_Kernel(FFT_Complex * datain, FFT_Complex *dataout) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { dataout[j*devC_cols + i].re = datain[(j + devC_y)*devC_divc + i + devC_x].re; dataout[j*devC_cols + i].im = datain[(j + devC_y)*devC_divc + i + devC_x].im; } } __global__ void transposeComplex_Kernel(FFT_Complex * datain, FFT_Complex *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; //unsigned char data; if (i<devC_cols && j<devC_rows) { dataout[i*devC_rows + j].re = datain[j*devC_cols + i].re; dataout[i*devC_rows + j].im = datain[j*devC_cols + i].im; } } __global__ void transpose32FC1_Kernel(float * datain, float *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; //unsigned char data; if (i<devC_cols && j<devC_rows) { dataout[i*devC_rows + j] = datain[j*devC_cols + i]; } } __global__ void transpose16UC1_Kernel(unsigned short * datain, unsigned short *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; //unsigned char data; if (i<devC_cols && j<devC_rows) { dataout[i*devC_rows + j] = datain[j*devC_cols + i]; } } __global__ void calcWinAndDispersion(FFT_Complex *data, FFT_Real *wind, FFT_Complex *dispersion) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; FFT_Real d1 = data[index].re*dispersion[i].re - data[index].im*dispersion[i].im; FFT_Real d2 = data[index].im*dispersion[i].re + data[index].re*dispersion[i].im; d1 *= wind[i]; d2 *= wind[i]; data[index].re = d1; data[index].im = d2; } } __global__ void power8UC1_Kernel(unsigned char * datain, unsigned char * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { float d = datain[j*devC_cols + i]; d = powf(d, devC_f1); if (d>255.0f) d = 255.0f; dataout[j*devC_cols + i] = d; } } __global__ void pixWindow16UC1To8UC1_Kernel(unsigned short * datain, unsigned char * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; float s_v = devC_y; float delta = devC_y; s_v /= 2.0f; s_v = devC_x - s_v; delta = 256.0f / delta; if (i<devC_cols && j<devC_rows) { float d = datain[j*devC_cols + i]; d = d - s_v; d = d*delta; d = d*(d>0.0f); d = d*(d <= 255.0f); //+255.0*(d>255.0f) dataout[j*devC_cols + i] = d; } } __global__ void allPixAvg_Kernel(unsigned short *datain , float *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int index = j * 256 + i; float sum = 0; if (index < devC_cols) { for (int k = 0; k < devC_rows; k++) { sum = sum + datain[k*devC_cols + index]; if (k > 0) sum /= 2.0f; } dataout[index] = sum; } } __global__ void threshold16UC1_Kernel(unsigned short * datain, unsigned short * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { int th = 0; th = (datain[j*devC_cols + i] >= devC_x); if (devC_divc & 0x0080) th = ~th; if ((devC_divc & 0x000F) == 0) { dataout[j*devC_cols + i] = th * 65535; } else if ((devC_divc & 0x000F) == 1) { dataout[j*devC_cols + i] = th*datain[j*devC_cols + i]; } } } __global__ void zeroLeftComplexAndDivConst_Kernel(FFT_Complex * data) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < devC_cols && j < devC_rows) { const int index = j*devC_cols + i; data[index].re = (data[index].re / devC_f1)*(i >= (devC_cols/2)); data[index].im = (data[index].im / devC_f1)*(i >= (devC_cols/2)); } } __global__ void zeroComplexReOrIm_Kernel(FFT_Complex * data) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < devC_cols && j < devC_rows) { const int index = j*devC_cols + i; data[index].re = (data[index].re )*(devC_divc == 0); data[index].im = (data[index].im)*(devC_divc == 1); } } __global__ void flipH8UC1_Kernel(unsigned char * datain, unsigned char * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { dataout[j*devC_cols + i] = datain[(devC_rows - 1 - j)*devC_cols + i]; } } ////////////////////////////////////////////////////////////////////////// // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<cuda kernel function ////////////////// ////////////////////////////////////////////////////////////////////// void CUDACALLMODE setIsCudaAccLibOK(int isok) { isCudaOk = isok; } int CUDACALLMODE getIsCudaAccLibOK(void) { return isCudaOk; } _CUDA_DEV_INFO* CUDACALLMODE getCudaDeviceInfo(int id) { hipFree(0); if (id >= 0 || id <= 4) { hipError_t res; hipDeviceProp_t device_prop; res = hipSetDevice(id); if (res != hipSuccess) { fprintf(stderr, "invaild cuda id!"); return &deviceinfo; } device_prop.name[0] = 0; hipGetDeviceProperties(&device_prop, id); sprintf(deviceinfo.name, "%s", device_prop.name); deviceinfo.major = device_prop.major; deviceinfo.minor = device_prop.minor; deviceinfo.multiProcessorCount = device_prop.multiProcessorCount; deviceinfo.deviceOverlap = device_prop.deviceOverlap; } else { deviceinfo.name[0] = 0; } return &deviceinfo; } int getCudaDeviceCount(void) { int device_count; hipGetDeviceCount(&device_count); return device_count; } int setCudaDevTo(int id) { hipError_t cudaStatus = hipSuccess; cudaStatus = hipSetDevice(id); if (cudaStatus == hipSuccess) { return 1; } else { return 0; } } void CUDACALLMODE cudaNop(void) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0) { cudaStatus = hipMalloc((void**)&dev_temp_4M1, 1024 * 1024 * 32); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } } if (dev_temp_4M2 == 0) { cudaStatus = hipMalloc((void**)&dev_temp_4M2, 1024 * 1024 * 64); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } } if (dev_temp_4M3 == 0) { cudaStatus = hipMalloc((void**)&dev_temp_4M3, 1024 * 1024 * 32); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } } if (dev_background_4M == 0) { cudaStatus = hipMalloc((void**)&dev_background_4M, 1024 * 1024 * 32); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } } } void CuH_FreeTempCudaMem(void) { if (dev_temp_4M1 != 0) { hipFree(dev_temp_4M1); dev_temp_4M1 = 0; } if (dev_temp_4M2 != 0) { hipFree(dev_temp_4M2); dev_temp_4M2 = 0; } if (dev_temp_4M3 != 0) { hipFree(dev_temp_4M2); dev_temp_4M3 = 0; } if (dev_background_4M != 0) { hipFree(dev_background_4M); dev_background_4M = 0; } if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error!\n"); } if (hipDeviceReset() != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!\n"); } } int cudaDevSync(void) { hipError_t cudaStatus = hipSuccess; cudaStatus = hipDeviceSynchronize(); if (cudaStatus == hipSuccess) { return 1; } else { return 0; } } int allocateFFTComplex(FFT_Complex** ptr, size_t size) { if (size < 1) { *ptr = nullptr; return 0; } hipError_t cudaStatus = hipSuccess; FFT_Complex *res = nullptr; cudaStatus = hipMalloc((void **)&res, size); if (cudaStatus == hipSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int allocateFFTComplexExt(FFT_Complex** ptr, int cols, int rows) { if (cols < 0) { *ptr = nullptr; return 0; } if (rows < 0) rows = 1; size_t size = static_cast<size_t>(cols)*static_cast<size_t>(rows)*sizeof(FFT_Complex); hipError_t cudaStatus = hipSuccess; FFT_Complex *res = nullptr; cudaStatus = hipMalloc((void **)&res, size); if (cudaStatus == hipSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int allocateFFTReal(FFT_Real** ptr, size_t size) { if (size < 1) { *ptr = nullptr; return 0; } hipError_t cudaStatus = hipSuccess; FFT_Real *res = nullptr; cudaStatus = hipMalloc((void **)&res, size); if (cudaStatus == hipSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int allocateFFTRealExt(FFT_Real** ptr, int cols, int rows) { if (cols < 0) { *ptr = nullptr; return 0; } if (rows < 0) rows = 1; size_t size = static_cast<size_t>(cols)*static_cast<size_t>(rows)*sizeof(FFT_Real); hipError_t cudaStatus = hipSuccess; FFT_Real *res = nullptr; cudaStatus = hipMalloc((void **)&res, size); if (cudaStatus == hipSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int freeCudaMem(void *ptr) { hipError_t cudaStatus = hipSuccess; cudaStatus = hipFree(ptr); if (cudaStatus == hipSuccess) { return 1; } else { return 0; } } int cudaMemFromHost(void *dstDev, void *srcHost, size_t byteSize) { if (byteSize < 1) { return 0; } hipError_t cudaStatus = hipSuccess; cudaStatus = hipMemcpy(dstDev, srcHost, byteSize, hipMemcpyHostToDevice); if (cudaStatus == hipSuccess) { return 1; } else { return 0; } } int cudaMemToHost(void *dstHost, void *srcDev, size_t byteSize) { if (byteSize < 1) { return 0; } hipError_t cudaStatus = hipSuccess; cudaStatus = hipMemcpy(dstHost, srcDev, byteSize, hipMemcpyDeviceToHost); if (cudaStatus == hipSuccess) { return 1; } else { return 0; } } int cudaMemDevToDev(void *dstDev, void *srcDev, size_t byteSize) { if (byteSize < 1) { return 0; } hipError_t cudaStatus = hipSuccess; cudaStatus = hipMemcpy(dstDev, srcDev, byteSize, hipMemcpyDeviceToDevice); if (cudaStatus == hipSuccess) { return 1; } else { return 0; } } int destroyFFTPlan(FFTPlan_Handle plan) { hipfftResult cudaStatus = HIPFFT_SUCCESS; cudaStatus = hipfftDestroy(plan); if (cudaStatus == HIPFFT_SUCCESS) { return 1; } else { return 0; } } int createFFTPlan1d_R2C(FFTPlan_Handle *plan, int cols, int rows) { if (cols < 0) { *plan = 0; return 0; } if (rows < 0) rows = 1; hipfftResult cudaStatus = HIPFFT_SUCCESS; cudaStatus = hipfftPlan1d(plan, cols, HIPFFT_R2C, rows); if (cudaStatus == HIPFFT_SUCCESS) { return 1; } else { return 0; } } int createFFTPlan1d_C2C(FFTPlan_Handle *plan, int cols, int rows) { if (cols < 0) { *plan = 0; return 0; } if (rows < 0) rows = 1; hipfftResult cudaStatus = HIPFFT_SUCCESS; cudaStatus = hipfftPlan1d(plan, cols, HIPFFT_C2C, rows); if (cudaStatus == HIPFFT_SUCCESS) { return 1; } else { return 0; } } int createFFTPlan1d_C2R(FFTPlan_Handle *plan, int cols, int rows) { if (cols < 0) { *plan = 0; return 0; } if (rows < 0) rows = 1; hipfftResult cudaStatus = HIPFFT_SUCCESS; cudaStatus = hipfftPlan1d(plan, cols, HIPFFT_C2R, rows); if (cudaStatus == HIPFFT_SUCCESS) { return 1; } else { return 0; } } int execR2CfftPlan(FFTPlan_Handle plan, FFT_Real *idata, FFT_Complex *odata) { hipfftResult cudaStatus = HIPFFT_SUCCESS; cudaStatus = hipfftExecR2C(plan, idata, (hipfftComplex*)odata); if (cudaStatus == HIPFFT_SUCCESS) { return 1; } else { return 0; } } int execC2CfftPlan(FFTPlan_Handle plan, FFT_Complex *idata, FFT_Complex *odata ,int dir) { hipfftResult cudaStatus = HIPFFT_SUCCESS; int d = HIPFFT_FORWARD; if (dir) { d = HIPFFT_BACKWARD; } cudaStatus = hipfftExecC2C(plan, (hipfftComplex*)idata, (hipfftComplex*)odata, d); if (cudaStatus == HIPFFT_SUCCESS) { return 1; } else { return 0; } } int execC2RfftPlan(FFTPlan_Handle plan, FFT_Complex *idata, FFT_Real *odata) { hipfftResult cudaStatus = HIPFFT_SUCCESS; cudaStatus = hipfftExecC2R(plan,(hipfftComplex *) idata, (hipfftReal*)odata); if (cudaStatus == HIPFFT_SUCCESS) { return 1; } else { return 0; } } int CuH_downloadTemp4M2(int size, unsigned char* host_dst) { hipError_t cudaStatus = hipSuccess; cudaStatus = hipMemcpy((void*)host_dst, dev_temp_4M2, size, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } return 0; } int CuH_uploadTemp4M2(int size, unsigned char* host_dst) { hipError_t cudaStatus = hipSuccess; cudaStatus = hipMemcpy(dev_temp_4M2, (void*)host_dst, size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } return 0; } int CuH_magnitudeDevC2R(FFT_Complex *devSrc, int cols, int rows, FFT_Real *hostDst) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } FFT_Complex *srcPtr = 0; if (devSrc) { srcPtr = devSrc; } else { srcPtr = (FFT_Complex*)dev_temp_4M2; } hipLaunchKernelGGL(( magnitude32F_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, srcPtr, (FFT_Real*)dev_temp_4M1); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "magnitude32F_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "CuH_magnitudeDevC2R returned error code %d after launching magnitude32F_Kernel!\n", cudaStatus); return 1; } if (hostDst) { cudaStatus = hipMemcpy(hostDst, dev_temp_4M1, rows*cols*sizeof(FFT_Real), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } else { cudaStatus = hipMemcpy(dev_temp_4M2, dev_temp_4M1, rows*cols*sizeof(FFT_Real), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } return 0; } int CuH_logDevR2R(FFT_Real *devSrc, int cols, int rows, float beta, FFT_Real *hostDst) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_y, &beta, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } FFT_Real *srcPtr = 0; if (devSrc) { srcPtr = devSrc; } else { srcPtr = (FFT_Real*)dev_temp_4M1; cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(FFT_Real), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } hipLaunchKernelGGL(( logAddBeta32F_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, srcPtr, (FFT_Real*)dev_temp_4M2); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "logAddBeta32F_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "CuH_logDevR2R returned error code %d after launching logAddBeta32F_Kernel!\n", cudaStatus); return 1; } if (hostDst) { cudaStatus = hipMemcpy(hostDst, dev_temp_4M2, rows*cols*sizeof(FFT_Real), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } return 0; } int CuH_cvtDevRealTo16UC1(FFT_Real *devSrc, int cols, int rows, float alpha, float beta, unsigned short *hostDst) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_x, &alpha, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_y, &beta, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } FFT_Real *srcPtr = 0; if (devSrc) { srcPtr = devSrc; } else { srcPtr = (FFT_Real*)dev_temp_4M1; cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(FFT_Real), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } hipLaunchKernelGGL(( cvtAndScale32Fto16U_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, srcPtr, (unsigned short*)dev_temp_4M2); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "cvtAndScale32Fto16U_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "CuH_cvtDevRealTo16UC1 returned error code %d after launching cvtAndScale32Fto16U_Kernel!\n", cudaStatus); return 1; } if (hostDst) { cudaStatus = hipMemcpy(hostDst, dev_temp_4M2, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } return 0; } int CuH_cpyHostRealToDevComplex(FFT_Real *srcHost, FFT_Complex *dstDev, int cols, int rows) { if (!dstDev) return 1; hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } if (srcHost) { cudaStatus = hipMemcpy(dev_temp_4M1, srcHost, rows*cols*sizeof(FFT_Real), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } else { cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(FFT_Real), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } hipLaunchKernelGGL(( cpyRealToComplex_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, (FFT_Real*)dev_temp_4M1, dstDev); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "cpyRealToComplex_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "CuH_cpyHostRealToDevComplex returned error code %d after launching cpyRealToComplex_Kernel!\n", cudaStatus); return 1; } //cudaStatus = hipMemcpy(dev_temp_4M2, dstDev, rows*cols*sizeof(FFT_Complex), hipMemcpyDeviceToDevice); //if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // return 1; //} return 0; } int CuH_cpy16UC1ToDevComplex(unsigned short *srcHost, FFT_Complex *dstDev, int cols, int rows) { if (!dstDev) return 1; hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } if (srcHost) { cudaStatus = hipMemcpy(dev_temp_4M1, srcHost, rows*cols*sizeof(unsigned short), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } else { cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } hipLaunchKernelGGL(( cpy16UC1ToComplex_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, (unsigned short*)dev_temp_4M1, dstDev); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "cpy16UC1ToComplex_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "CuH_cpyHostRealToDevComplex returned error code %d after launching cpy16UC1ToComplex_Kernel!\n", cudaStatus); return 1; } //cudaStatus = hipMemcpy(dev_temp_4M2, dstDev, rows*cols*sizeof(FFT_Complex), hipMemcpyDeviceToDevice); //if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // return 1; //} return 0; } int CuH_ROIdevComplex(FFT_Complex *dataDev, int cols, int rows, int x, int y, int width, int height) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_x, &x, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_y, &y, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &width, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &height, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_divc, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(width / 16, height / 16); if (width % 16) { gridS.x += 1; } if (height % 16) { gridS.y += 1; } hipLaunchKernelGGL(( ROI_Complex_Kernel), dim3(gridS), dim3(blockS) , 0, 0, dataDev, (FFT_Complex *)dev_temp_4M2); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "ROI_Complex_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching ROI_Complex_Kernel!\n", cudaStatus); return 1; } cudaStatus = hipMemcpy(dataDev, dev_temp_4M2, width*height*sizeof(FFT_Complex), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } return 0; } int transpose16UC1(int rows, int cols, void* dev_src, void *dev_dst) { int res = 0; hipError_t cudaStatus = hipSuccess; //copy constant cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } //invoke kernel hipLaunchKernelGGL(( transpose16UC1_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, (unsigned short *)dev_src, (unsigned short *)dev_dst); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "transpose16UC1 Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching transpose16UC1_Kernel!\n", cudaStatus); return 1; } return res; } int CuH_transposeComplex(int rows, int cols, FFT_Complex* dev_src, FFT_Complex *dev_dst) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } //invoke kernel hipLaunchKernelGGL(( transposeComplex_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, dev_src, dev_dst); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "transposeComplex_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching transposeComplex_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_transpose32FC1(int rows, int cols, void* dev_src, void *dev_dst) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } float *datain = nullptr; if (dev_src) { datain = (float *)dev_src; } else { datain = (float *)dev_temp_4M1; cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(float), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } float *dataout = nullptr; if (dev_dst) { dataout = (float *)dev_dst; } else { dataout = (float *)dev_temp_4M2; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } //invoke kernel hipLaunchKernelGGL(( transpose32FC1_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, datain, dataout); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "transpose32FC1_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching transpose32FC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_transpose16UC1(int rows, int cols, void* host_src, void *host_dst) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } if (host_src) { cudaStatus = hipMemcpy(dev_temp_4M1, host_src, rows*cols*sizeof(unsigned short), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } else { cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } if (transpose16UC1(rows, cols, (void*)dev_temp_4M1, (void*)dev_temp_4M2)) return 1; if (host_dst) { cudaStatus = hipMemcpy(host_dst, dev_temp_4M2, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } return 0; } int CuH_devCdataCalcWinAndDispersion(int cols, int rows, FFT_Complex *dataDev, FFT_Real *winDev, FFT_Complex *dispersionDev) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } hipLaunchKernelGGL(( calcWinAndDispersion) , dim3(gridS), dim3(blockS) , 0, 0, dataDev, winDev, dispersionDev); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "calcWinAndDispersion Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "CuH_devCdataCalcWinAndDispersion returned error code %d after launching calcWinAndDispersion!\n", cudaStatus); return 1; } return 0; } int CuH_power8UC1(int rows, int cols, float p) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_f1, &p, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols, hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } hipLaunchKernelGGL(( power8UC1_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, (unsigned char*)dev_temp_4M1, (unsigned char*)dev_temp_4M2); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "power8UC1 Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching power8UC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_pixWindow16UC1To8UC1(int rows, int cols, int winCenter, int winWidth, unsigned short *host_src) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_x, &winCenter, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_y, &winWidth, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } if (!host_src) { cudaStatus = hipMemcpy((void*)dev_temp_4M1, (void*)dev_temp_4M2, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } else { cudaStatus = hipMemcpy((void*)dev_temp_4M1, (void*)host_src, rows*cols*sizeof(unsigned short), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } hipLaunchKernelGGL(( pixWindow16UC1To8UC1_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, (unsigned short*)dev_temp_4M1, (unsigned char*)dev_temp_4M2); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "pixWindow16UC1To8UC1_Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching pixWindow16UC1To8UC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_allPixAvgValue(int rows, int cols, unsigned short* host_src, float *host_res) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0 || dev_temp_4M3 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } if (!host_src) { cudaStatus = hipMemcpy((void*)dev_temp_4M1, (void*)dev_temp_4M2, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } else { cudaStatus = hipMemcpy((void*)dev_temp_4M1, (void*)host_src, rows*cols*sizeof(unsigned short), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } //calc block size int dimx = 256; int dimy = cols / dimx; if (cols % dimx) { dimy += 1; } hipLaunchKernelGGL(( allPixAvg_Kernel) , dim3(dimy), dim3(dimx) , 0, 0, (unsigned short*)dev_temp_4M1, (float *)dev_temp_4M3); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "allPixAvg_Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching allPixAvg_Kernel!\n", cudaStatus); return 1; } float *avgArr = (float*)malloc(rows*cols*sizeof(float)); if (!avgArr) { fprintf(stderr, "malloc() failed!"); return 1; } cudaStatus = hipMemcpy((void*)avgArr, (void*)dev_temp_4M3, cols*sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); if (avgArr) { free(avgArr); avgArr = nullptr; } return 1; } for (int i = 1; i < cols; i++) { avgArr[0] += avgArr[i]; avgArr[0] /= 2.0f; } host_res[0] = avgArr[0]; if (avgArr) { free(avgArr); avgArr = nullptr; } return 0; } int CuH_threshold16UC1(int rows, int cols, int thres, int mode, unsigned short* host_src) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_x, &thres, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_divc, &mode, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } if (host_src) { cudaStatus = hipMemcpy((void*)dev_temp_4M1, (void*)host_src, rows*cols*sizeof(unsigned short), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } else { cudaStatus = hipMemcpy((void*)dev_temp_4M1, (void*)dev_temp_4M2, rows*cols*sizeof(unsigned short), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } hipLaunchKernelGGL(( threshold16UC1_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, (unsigned short*)dev_temp_4M1, (unsigned short*)dev_temp_4M2); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "threshold16UC1_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching threshold16UC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_zeroLeftComplexAndDivConst(int rows, int cols, float divConst, FFT_Complex *dataDev) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_f1, &divConst, sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } hipLaunchKernelGGL(( zeroLeftComplexAndDivConst_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, dataDev); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "zeroLeftComplexAndDivConst_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching zeroLeftComplexAndDivConst_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_zeroComplexReOrIm(int rows, int cols, int mode, FFT_Complex *dataDev) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_divc, &mode, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } hipLaunchKernelGGL(( zeroComplexReOrIm_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, dataDev); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "zeroComplexReOrIm_Kernel Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching zeroComplexReOrIm_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_flipH8UC1(int rows, int cols) { hipError_t cudaStatus = hipSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = hipMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols, hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } hipLaunchKernelGGL(( flipH8UC1_Kernel) , dim3(gridS), dim3(blockS) , 0, 0, (unsigned char*)dev_temp_4M1, (unsigned char*)dev_temp_4M2); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "flipH8UC1 Kernel failed: %s\n", hipGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching flipH8UC1_Kernel!\n", cudaStatus); return 1; } return 0; }
43dd0e4b7c1fa1c9bb7f4a61044a1feb6ac80ee1.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cufft.h" #include <stdio.h> #include "CudaHelpLib.h" static int isCudaOk = 0; static _CUDA_DEV_INFO deviceinfo; ///dev constant define __constant__ int devC_cols; __constant__ int devC_rows; __constant__ int devC_divc; __constant__ int devC_x; __constant__ int devC_y; __constant__ float devC_f1; __constant__ float devC_xe; __constant__ float devC_ye; __constant__ float devC_ze; __constant__ unsigned int devC_Palette[512]; //float guassianTable[512]; //////////////////// static int *dev_temp_4M1 = 0; static int *dev_temp_4M2 = 0; static int *dev_temp_4M3 = 0; static unsigned char *dev_background_4M = 0; static unsigned char *dev_cuboid = 0; //////////////////////////////// //>>>>>>>>>>>>>>>>share lib main func #ifndef Q_OS_LINUX #include "Windows.h" BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved ) { switch (ul_reason_for_call) { case DLL_PROCESS_ATTACH: break; case DLL_THREAD_ATTACH: break; case DLL_THREAD_DETACH: break; case DLL_PROCESS_DETACH: CuH_FreeTempCudaMem(); break; } return TRUE; } #endif //<<<<<<<<<<<<< ////////////////////////////////////////////////////////////////////////// // >>>>>>>>>>>>>>>>>>>>>>>>>>>>>cuda kernel function ////////////////// ////////////////////////////////////////////////////////////////////// __global__ void magnitude32F_Kernel(FFT_Complex * datain, FFT_Real * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; float d = datain[index].re*datain[index].re + datain[index].im*datain[index].im; d = sqrtf(d); dataout[index] = d; } } __global__ void logAddBeta32F_Kernel(FFT_Real * datain, FFT_Real * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; float beta = *((float*)(&devC_y)); float d = datain[index]; d = d + beta; d = logf(d); dataout[index] = d; } } __global__ void cvtAndScale32Fto16U_Kernel(FFT_Real * datain, unsigned short * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; float d = *((float*)(&devC_x)); float beta = *((float*)(&devC_y)); d = d*datain[index] + beta; d = (d >= 0 && d <= 65535.0f)*d + (d > 65535.0f)*65535.0f; dataout[index] = d; } } __global__ void cpyRealToComplex_Kernel(FFT_Real * datain, FFT_Complex * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; dataout[index].re = datain[index]; dataout[index].im = 0; } } __global__ void cpy16UC1ToComplex_Kernel(unsigned short * datain, FFT_Complex * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; dataout[index].re = datain[index]; dataout[index].im = 0; } } __global__ void ROI_Complex_Kernel(FFT_Complex * datain, FFT_Complex *dataout) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { dataout[j*devC_cols + i].re = datain[(j + devC_y)*devC_divc + i + devC_x].re; dataout[j*devC_cols + i].im = datain[(j + devC_y)*devC_divc + i + devC_x].im; } } __global__ void transposeComplex_Kernel(FFT_Complex * datain, FFT_Complex *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; //unsigned char data; if (i<devC_cols && j<devC_rows) { dataout[i*devC_rows + j].re = datain[j*devC_cols + i].re; dataout[i*devC_rows + j].im = datain[j*devC_cols + i].im; } } __global__ void transpose32FC1_Kernel(float * datain, float *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; //unsigned char data; if (i<devC_cols && j<devC_rows) { dataout[i*devC_rows + j] = datain[j*devC_cols + i]; } } __global__ void transpose16UC1_Kernel(unsigned short * datain, unsigned short *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; //unsigned char data; if (i<devC_cols && j<devC_rows) { dataout[i*devC_rows + j] = datain[j*devC_cols + i]; } } __global__ void calcWinAndDispersion(FFT_Complex *data, FFT_Real *wind, FFT_Complex *dispersion) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { const int index = j*devC_cols + i; FFT_Real d1 = data[index].re*dispersion[i].re - data[index].im*dispersion[i].im; FFT_Real d2 = data[index].im*dispersion[i].re + data[index].re*dispersion[i].im; d1 *= wind[i]; d2 *= wind[i]; data[index].re = d1; data[index].im = d2; } } __global__ void power8UC1_Kernel(unsigned char * datain, unsigned char * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { float d = datain[j*devC_cols + i]; d = powf(d, devC_f1); if (d>255.0f) d = 255.0f; dataout[j*devC_cols + i] = d; } } __global__ void pixWindow16UC1To8UC1_Kernel(unsigned short * datain, unsigned char * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; float s_v = devC_y; float delta = devC_y; s_v /= 2.0f; s_v = devC_x - s_v; delta = 256.0f / delta; if (i<devC_cols && j<devC_rows) { float d = datain[j*devC_cols + i]; d = d - s_v; d = d*delta; d = d*(d>0.0f); d = d*(d <= 255.0f); //+255.0*(d>255.0f) dataout[j*devC_cols + i] = d; } } __global__ void allPixAvg_Kernel(unsigned short *datain , float *dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int index = j * 256 + i; float sum = 0; if (index < devC_cols) { for (int k = 0; k < devC_rows; k++) { sum = sum + datain[k*devC_cols + index]; if (k > 0) sum /= 2.0f; } dataout[index] = sum; } } __global__ void threshold16UC1_Kernel(unsigned short * datain, unsigned short * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { int th = 0; th = (datain[j*devC_cols + i] >= devC_x); if (devC_divc & 0x0080) th = ~th; if ((devC_divc & 0x000F) == 0) { dataout[j*devC_cols + i] = th * 65535; } else if ((devC_divc & 0x000F) == 1) { dataout[j*devC_cols + i] = th*datain[j*devC_cols + i]; } } } __global__ void zeroLeftComplexAndDivConst_Kernel(FFT_Complex * data) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < devC_cols && j < devC_rows) { const int index = j*devC_cols + i; data[index].re = (data[index].re / devC_f1)*(i >= (devC_cols/2)); data[index].im = (data[index].im / devC_f1)*(i >= (devC_cols/2)); } } __global__ void zeroComplexReOrIm_Kernel(FFT_Complex * data) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < devC_cols && j < devC_rows) { const int index = j*devC_cols + i; data[index].re = (data[index].re )*(devC_divc == 0); data[index].im = (data[index].im)*(devC_divc == 1); } } __global__ void flipH8UC1_Kernel(unsigned char * datain, unsigned char * dataout) { const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i<devC_cols && j<devC_rows) { dataout[j*devC_cols + i] = datain[(devC_rows - 1 - j)*devC_cols + i]; } } ////////////////////////////////////////////////////////////////////////// // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<cuda kernel function ////////////////// ////////////////////////////////////////////////////////////////////// void CUDACALLMODE setIsCudaAccLibOK(int isok) { isCudaOk = isok; } int CUDACALLMODE getIsCudaAccLibOK(void) { return isCudaOk; } _CUDA_DEV_INFO* CUDACALLMODE getCudaDeviceInfo(int id) { cudaFree(0); if (id >= 0 || id <= 4) { cudaError_t res; cudaDeviceProp device_prop; res = cudaSetDevice(id); if (res != cudaSuccess) { fprintf(stderr, "invaild cuda id!"); return &deviceinfo; } device_prop.name[0] = 0; cudaGetDeviceProperties(&device_prop, id); sprintf(deviceinfo.name, "%s", device_prop.name); deviceinfo.major = device_prop.major; deviceinfo.minor = device_prop.minor; deviceinfo.multiProcessorCount = device_prop.multiProcessorCount; deviceinfo.deviceOverlap = device_prop.deviceOverlap; } else { deviceinfo.name[0] = 0; } return &deviceinfo; } int getCudaDeviceCount(void) { int device_count; cudaGetDeviceCount(&device_count); return device_count; } int setCudaDevTo(int id) { cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaSetDevice(id); if (cudaStatus == cudaSuccess) { return 1; } else { return 0; } } void CUDACALLMODE cudaNop(void) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0) { cudaStatus = cudaMalloc((void**)&dev_temp_4M1, 1024 * 1024 * 32); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } } if (dev_temp_4M2 == 0) { cudaStatus = cudaMalloc((void**)&dev_temp_4M2, 1024 * 1024 * 64); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } } if (dev_temp_4M3 == 0) { cudaStatus = cudaMalloc((void**)&dev_temp_4M3, 1024 * 1024 * 32); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } } if (dev_background_4M == 0) { cudaStatus = cudaMalloc((void**)&dev_background_4M, 1024 * 1024 * 32); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } } } void CuH_FreeTempCudaMem(void) { if (dev_temp_4M1 != 0) { cudaFree(dev_temp_4M1); dev_temp_4M1 = 0; } if (dev_temp_4M2 != 0) { cudaFree(dev_temp_4M2); dev_temp_4M2 = 0; } if (dev_temp_4M3 != 0) { cudaFree(dev_temp_4M2); dev_temp_4M3 = 0; } if (dev_background_4M != 0) { cudaFree(dev_background_4M); dev_background_4M = 0; } if (cudaDeviceSynchronize() != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error!\n"); } if (cudaDeviceReset() != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!\n"); } } int cudaDevSync(void) { cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaDeviceSynchronize(); if (cudaStatus == cudaSuccess) { return 1; } else { return 0; } } int allocateFFTComplex(FFT_Complex** ptr, size_t size) { if (size < 1) { *ptr = nullptr; return 0; } cudaError_t cudaStatus = cudaSuccess; FFT_Complex *res = nullptr; cudaStatus = cudaMalloc((void **)&res, size); if (cudaStatus == cudaSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int allocateFFTComplexExt(FFT_Complex** ptr, int cols, int rows) { if (cols < 0) { *ptr = nullptr; return 0; } if (rows < 0) rows = 1; size_t size = static_cast<size_t>(cols)*static_cast<size_t>(rows)*sizeof(FFT_Complex); cudaError_t cudaStatus = cudaSuccess; FFT_Complex *res = nullptr; cudaStatus = cudaMalloc((void **)&res, size); if (cudaStatus == cudaSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int allocateFFTReal(FFT_Real** ptr, size_t size) { if (size < 1) { *ptr = nullptr; return 0; } cudaError_t cudaStatus = cudaSuccess; FFT_Real *res = nullptr; cudaStatus = cudaMalloc((void **)&res, size); if (cudaStatus == cudaSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int allocateFFTRealExt(FFT_Real** ptr, int cols, int rows) { if (cols < 0) { *ptr = nullptr; return 0; } if (rows < 0) rows = 1; size_t size = static_cast<size_t>(cols)*static_cast<size_t>(rows)*sizeof(FFT_Real); cudaError_t cudaStatus = cudaSuccess; FFT_Real *res = nullptr; cudaStatus = cudaMalloc((void **)&res, size); if (cudaStatus == cudaSuccess && res) { *ptr = res; return 1; } else { *ptr = nullptr; return 0; } } int freeCudaMem(void *ptr) { cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaFree(ptr); if (cudaStatus == cudaSuccess) { return 1; } else { return 0; } } int cudaMemFromHost(void *dstDev, void *srcHost, size_t byteSize) { if (byteSize < 1) { return 0; } cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaMemcpy(dstDev, srcHost, byteSize, cudaMemcpyHostToDevice); if (cudaStatus == cudaSuccess) { return 1; } else { return 0; } } int cudaMemToHost(void *dstHost, void *srcDev, size_t byteSize) { if (byteSize < 1) { return 0; } cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaMemcpy(dstHost, srcDev, byteSize, cudaMemcpyDeviceToHost); if (cudaStatus == cudaSuccess) { return 1; } else { return 0; } } int cudaMemDevToDev(void *dstDev, void *srcDev, size_t byteSize) { if (byteSize < 1) { return 0; } cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaMemcpy(dstDev, srcDev, byteSize, cudaMemcpyDeviceToDevice); if (cudaStatus == cudaSuccess) { return 1; } else { return 0; } } int destroyFFTPlan(FFTPlan_Handle plan) { cufftResult cudaStatus = CUFFT_SUCCESS; cudaStatus = cufftDestroy(plan); if (cudaStatus == CUFFT_SUCCESS) { return 1; } else { return 0; } } int createFFTPlan1d_R2C(FFTPlan_Handle *plan, int cols, int rows) { if (cols < 0) { *plan = 0; return 0; } if (rows < 0) rows = 1; cufftResult cudaStatus = CUFFT_SUCCESS; cudaStatus = cufftPlan1d(plan, cols, CUFFT_R2C, rows); if (cudaStatus == CUFFT_SUCCESS) { return 1; } else { return 0; } } int createFFTPlan1d_C2C(FFTPlan_Handle *plan, int cols, int rows) { if (cols < 0) { *plan = 0; return 0; } if (rows < 0) rows = 1; cufftResult cudaStatus = CUFFT_SUCCESS; cudaStatus = cufftPlan1d(plan, cols, CUFFT_C2C, rows); if (cudaStatus == CUFFT_SUCCESS) { return 1; } else { return 0; } } int createFFTPlan1d_C2R(FFTPlan_Handle *plan, int cols, int rows) { if (cols < 0) { *plan = 0; return 0; } if (rows < 0) rows = 1; cufftResult cudaStatus = CUFFT_SUCCESS; cudaStatus = cufftPlan1d(plan, cols, CUFFT_C2R, rows); if (cudaStatus == CUFFT_SUCCESS) { return 1; } else { return 0; } } int execR2CfftPlan(FFTPlan_Handle plan, FFT_Real *idata, FFT_Complex *odata) { cufftResult cudaStatus = CUFFT_SUCCESS; cudaStatus = cufftExecR2C(plan, idata, (cufftComplex*)odata); if (cudaStatus == CUFFT_SUCCESS) { return 1; } else { return 0; } } int execC2CfftPlan(FFTPlan_Handle plan, FFT_Complex *idata, FFT_Complex *odata ,int dir) { cufftResult cudaStatus = CUFFT_SUCCESS; int d = CUFFT_FORWARD; if (dir) { d = CUFFT_INVERSE; } cudaStatus = cufftExecC2C(plan, (cufftComplex*)idata, (cufftComplex*)odata, d); if (cudaStatus == CUFFT_SUCCESS) { return 1; } else { return 0; } } int execC2RfftPlan(FFTPlan_Handle plan, FFT_Complex *idata, FFT_Real *odata) { cufftResult cudaStatus = CUFFT_SUCCESS; cudaStatus = cufftExecC2R(plan,(cufftComplex *) idata, (cufftReal*)odata); if (cudaStatus == CUFFT_SUCCESS) { return 1; } else { return 0; } } int CuH_downloadTemp4M2(int size, unsigned char* host_dst) { cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaMemcpy((void*)host_dst, dev_temp_4M2, size, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } return 0; } int CuH_uploadTemp4M2(int size, unsigned char* host_dst) { cudaError_t cudaStatus = cudaSuccess; cudaStatus = cudaMemcpy(dev_temp_4M2, (void*)host_dst, size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } return 0; } int CuH_magnitudeDevC2R(FFT_Complex *devSrc, int cols, int rows, FFT_Real *hostDst) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } FFT_Complex *srcPtr = 0; if (devSrc) { srcPtr = devSrc; } else { srcPtr = (FFT_Complex*)dev_temp_4M2; } magnitude32F_Kernel <<<gridS, blockS >>>(srcPtr, (FFT_Real*)dev_temp_4M1); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "magnitude32F_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "CuH_magnitudeDevC2R returned error code %d after launching magnitude32F_Kernel!\n", cudaStatus); return 1; } if (hostDst) { cudaStatus = cudaMemcpy(hostDst, dev_temp_4M1, rows*cols*sizeof(FFT_Real), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } else { cudaStatus = cudaMemcpy(dev_temp_4M2, dev_temp_4M1, rows*cols*sizeof(FFT_Real), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } return 0; } int CuH_logDevR2R(FFT_Real *devSrc, int cols, int rows, float beta, FFT_Real *hostDst) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_y, &beta, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } FFT_Real *srcPtr = 0; if (devSrc) { srcPtr = devSrc; } else { srcPtr = (FFT_Real*)dev_temp_4M1; cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(FFT_Real), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } logAddBeta32F_Kernel <<<gridS, blockS >>>(srcPtr, (FFT_Real*)dev_temp_4M2); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "logAddBeta32F_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "CuH_logDevR2R returned error code %d after launching logAddBeta32F_Kernel!\n", cudaStatus); return 1; } if (hostDst) { cudaStatus = cudaMemcpy(hostDst, dev_temp_4M2, rows*cols*sizeof(FFT_Real), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } return 0; } int CuH_cvtDevRealTo16UC1(FFT_Real *devSrc, int cols, int rows, float alpha, float beta, unsigned short *hostDst) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_x, &alpha, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_y, &beta, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } FFT_Real *srcPtr = 0; if (devSrc) { srcPtr = devSrc; } else { srcPtr = (FFT_Real*)dev_temp_4M1; cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(FFT_Real), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } cvtAndScale32Fto16U_Kernel <<<gridS, blockS >>>(srcPtr, (unsigned short*)dev_temp_4M2); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cvtAndScale32Fto16U_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "CuH_cvtDevRealTo16UC1 returned error code %d after launching cvtAndScale32Fto16U_Kernel!\n", cudaStatus); return 1; } if (hostDst) { cudaStatus = cudaMemcpy(hostDst, dev_temp_4M2, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } return 0; } int CuH_cpyHostRealToDevComplex(FFT_Real *srcHost, FFT_Complex *dstDev, int cols, int rows) { if (!dstDev) return 1; cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } if (srcHost) { cudaStatus = cudaMemcpy(dev_temp_4M1, srcHost, rows*cols*sizeof(FFT_Real), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } else { cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(FFT_Real), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } cpyRealToComplex_Kernel <<<gridS, blockS >>>((FFT_Real*)dev_temp_4M1, dstDev); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cpyRealToComplex_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "CuH_cpyHostRealToDevComplex returned error code %d after launching cpyRealToComplex_Kernel!\n", cudaStatus); return 1; } //cudaStatus = cudaMemcpy(dev_temp_4M2, dstDev, rows*cols*sizeof(FFT_Complex), cudaMemcpyDeviceToDevice); //if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // return 1; //} return 0; } int CuH_cpy16UC1ToDevComplex(unsigned short *srcHost, FFT_Complex *dstDev, int cols, int rows) { if (!dstDev) return 1; cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } if (srcHost) { cudaStatus = cudaMemcpy(dev_temp_4M1, srcHost, rows*cols*sizeof(unsigned short), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } else { cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } cpy16UC1ToComplex_Kernel <<<gridS, blockS >>>((unsigned short*)dev_temp_4M1, dstDev); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cpy16UC1ToComplex_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "CuH_cpyHostRealToDevComplex returned error code %d after launching cpy16UC1ToComplex_Kernel!\n", cudaStatus); return 1; } //cudaStatus = cudaMemcpy(dev_temp_4M2, dstDev, rows*cols*sizeof(FFT_Complex), cudaMemcpyDeviceToDevice); //if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // return 1; //} return 0; } int CuH_ROIdevComplex(FFT_Complex *dataDev, int cols, int rows, int x, int y, int width, int height) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_x, &x, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_y, &y, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &width, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &height, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_divc, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(width / 16, height / 16); if (width % 16) { gridS.x += 1; } if (height % 16) { gridS.y += 1; } ROI_Complex_Kernel<<<gridS, blockS >>>(dataDev, (FFT_Complex *)dev_temp_4M2); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ROI_Complex_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching ROI_Complex_Kernel!\n", cudaStatus); return 1; } cudaStatus = cudaMemcpy(dataDev, dev_temp_4M2, width*height*sizeof(FFT_Complex), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } return 0; } int transpose16UC1(int rows, int cols, void* dev_src, void *dev_dst) { int res = 0; cudaError_t cudaStatus = cudaSuccess; //copy constant cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } //invoke kernel transpose16UC1_Kernel <<<gridS, blockS >>>((unsigned short *)dev_src, (unsigned short *)dev_dst); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "transpose16UC1 Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching transpose16UC1_Kernel!\n", cudaStatus); return 1; } return res; } int CuH_transposeComplex(int rows, int cols, FFT_Complex* dev_src, FFT_Complex *dev_dst) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } //invoke kernel transposeComplex_Kernel <<<gridS, blockS >>>(dev_src, dev_dst); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "transposeComplex_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching transposeComplex_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_transpose32FC1(int rows, int cols, void* dev_src, void *dev_dst) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } float *datain = nullptr; if (dev_src) { datain = (float *)dev_src; } else { datain = (float *)dev_temp_4M1; cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(float), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } float *dataout = nullptr; if (dev_dst) { dataout = (float *)dev_dst; } else { dataout = (float *)dev_temp_4M2; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } //invoke kernel transpose32FC1_Kernel <<<gridS, blockS >>>(datain, dataout); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "transpose32FC1_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching transpose32FC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_transpose16UC1(int rows, int cols, void* host_src, void *host_dst) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } if (host_src) { cudaStatus = cudaMemcpy(dev_temp_4M1, host_src, rows*cols*sizeof(unsigned short), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } else { cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } if (transpose16UC1(rows, cols, (void*)dev_temp_4M1, (void*)dev_temp_4M2)) return 1; if (host_dst) { cudaStatus = cudaMemcpy(host_dst, dev_temp_4M2, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } return 0; } int CuH_devCdataCalcWinAndDispersion(int cols, int rows, FFT_Complex *dataDev, FFT_Real *winDev, FFT_Complex *dispersionDev) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } calcWinAndDispersion <<<gridS, blockS >>>(dataDev, winDev, dispersionDev); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "calcWinAndDispersion Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "CuH_devCdataCalcWinAndDispersion returned error code %d after launching calcWinAndDispersion!\n", cudaStatus); return 1; } return 0; } int CuH_power8UC1(int rows, int cols, float p) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_f1, &p, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols, cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } power8UC1_Kernel <<<gridS, blockS >>>((unsigned char*)dev_temp_4M1, (unsigned char*)dev_temp_4M2); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "power8UC1 Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching power8UC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_pixWindow16UC1To8UC1(int rows, int cols, int winCenter, int winWidth, unsigned short *host_src) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_x, &winCenter, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_y, &winWidth, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } if (!host_src) { cudaStatus = cudaMemcpy((void*)dev_temp_4M1, (void*)dev_temp_4M2, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } else { cudaStatus = cudaMemcpy((void*)dev_temp_4M1, (void*)host_src, rows*cols*sizeof(unsigned short), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } pixWindow16UC1To8UC1_Kernel <<<gridS, blockS >>>((unsigned short*)dev_temp_4M1, (unsigned char*)dev_temp_4M2); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "pixWindow16UC1To8UC1_Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching pixWindow16UC1To8UC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_allPixAvgValue(int rows, int cols, unsigned short* host_src, float *host_res) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0 || dev_temp_4M3 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } if (!host_src) { cudaStatus = cudaMemcpy((void*)dev_temp_4M1, (void*)dev_temp_4M2, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } else { cudaStatus = cudaMemcpy((void*)dev_temp_4M1, (void*)host_src, rows*cols*sizeof(unsigned short), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } //calc block size int dimx = 256; int dimy = cols / dimx; if (cols % dimx) { dimy += 1; } allPixAvg_Kernel <<<dimy, dimx >>>((unsigned short*)dev_temp_4M1, (float *)dev_temp_4M3); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "allPixAvg_Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching allPixAvg_Kernel!\n", cudaStatus); return 1; } float *avgArr = (float*)malloc(rows*cols*sizeof(float)); if (!avgArr) { fprintf(stderr, "malloc() failed!"); return 1; } cudaStatus = cudaMemcpy((void*)avgArr, (void*)dev_temp_4M3, cols*sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); if (avgArr) { free(avgArr); avgArr = nullptr; } return 1; } for (int i = 1; i < cols; i++) { avgArr[0] += avgArr[i]; avgArr[0] /= 2.0f; } host_res[0] = avgArr[0]; if (avgArr) { free(avgArr); avgArr = nullptr; } return 0; } int CuH_threshold16UC1(int rows, int cols, int thres, int mode, unsigned short* host_src) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_x, &thres, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_divc, &mode, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } if (host_src) { cudaStatus = cudaMemcpy((void*)dev_temp_4M1, (void*)host_src, rows*cols*sizeof(unsigned short), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } else { cudaStatus = cudaMemcpy((void*)dev_temp_4M1, (void*)dev_temp_4M2, rows*cols*sizeof(unsigned short), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } threshold16UC1_Kernel <<<gridS, blockS >>>((unsigned short*)dev_temp_4M1, (unsigned short*)dev_temp_4M2); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "threshold16UC1_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching threshold16UC1_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_zeroLeftComplexAndDivConst(int rows, int cols, float divConst, FFT_Complex *dataDev) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_f1, &divConst, sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } zeroLeftComplexAndDivConst_Kernel <<<gridS, blockS >>>(dataDev); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "zeroLeftComplexAndDivConst_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching zeroLeftComplexAndDivConst_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_zeroComplexReOrIm(int rows, int cols, int mode, FFT_Complex *dataDev) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_divc, &mode, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } zeroComplexReOrIm_Kernel <<<gridS, blockS >>>(dataDev); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "zeroComplexReOrIm_Kernel Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching zeroComplexReOrIm_Kernel!\n", cudaStatus); return 1; } return 0; } int CuH_flipH8UC1(int rows, int cols) { cudaError_t cudaStatus = cudaSuccess; if (dev_temp_4M1 == 0 || dev_temp_4M2 == 0) { printf("cuda mem alloc faild.\n"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_rows, &rows, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpyToSymbol(devC_cols, &cols, sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "set const var failed!"); return 1; } cudaStatus = cudaMemcpy(dev_temp_4M1, dev_temp_4M2, rows*cols, cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); return 1; } //calc block size dim3 blockS(16, 16); dim3 gridS(cols / 16, rows / 16); if (cols % 16) { gridS.x += 1; } if (rows % 16) { gridS.y += 1; } flipH8UC1_Kernel <<<gridS, blockS >>>((unsigned char*)dev_temp_4M1, (unsigned char*)dev_temp_4M2); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "flipH8UC1 Kernel failed: %s\n", cudaGetErrorString(cudaStatus)); return 1; } //wait kernel finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching flipH8UC1_Kernel!\n", cudaStatus); return 1; } return 0; }
4e7d7f9d754c369a20d5e41ab7162a0af0a0158b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <cuda/device.hpp> namespace pcl { namespace device { __device__ unsigned int count = 0; struct CorespSearch { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; struct plus { __forceinline__ __device__ int operator () (const int &lhs, const volatile int& rhs) const { return lhs + rhs; } }; PtrStep<float> vmap_g_curr; PtrStep<float> nmap_g_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; mutable PtrStepSz<short2> coresp; mutable int* gbuf; __device__ __forceinline__ int search () const { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= coresp.cols || y >= coresp.rows) return 0; coresp.ptr (y)[x] = make_short2 (-1, -1); float3 ncurr_g; ncurr_g.x = nmap_g_curr.ptr (y)[x]; if (isnan (ncurr_g.x)) return 0; float3 vcurr_g; vcurr_g.x = vmap_g_curr.ptr (y )[x]; vcurr_g.y = vmap_g_curr.ptr (y + coresp.rows)[x]; vcurr_g.z = vmap_g_curr.ptr (y + 2 * coresp.rows)[x]; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= coresp.cols || ukr.y >= coresp.rows) return 0; float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return 0; float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float dist = norm (vcurr_g - vprev_g); if (dist > distThres) return 0; ncurr_g.y = nmap_g_curr.ptr (y + coresp.rows)[x]; ncurr_g.z = nmap_g_curr.ptr (y + 2 * coresp.rows)[x]; nprev_g.y = nmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); /*if (sine >= 1 || asinf(sine) >= angleThres) return 0;*/ if (/*sine >= 1 || */ sine >= angleThres) return 0; coresp.ptr (y)[x] = make_short2 (ukr.x, ukr.y); return 1; } __device__ __forceinline__ void reduce (int i) const { __shared__ volatile int smem[CTA_SIZE]; int tid = Block::flattenedThreadId (); smem[tid] = i; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); __shared__ bool isLastBlockDone; if (tid == 0) { gbuf[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; __threadfence (); unsigned int value = atomicInc (&count, gridDim.x * gridDim.y); isLastBlockDone = (value == (gridDim.x * gridDim.y - 1)); } __syncthreads (); if (isLastBlockDone) { int sum = 0; int stride = Block::stride (); for (int pos = tid; pos < gridDim.x * gridDim.y; pos += stride) sum += gbuf[pos]; smem[tid] = sum; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) { gbuf[0] = smem[0]; count = 0; } } } __device__ __forceinline__ void operator () () const { int mask = search (); //reduce(mask); if uncomment -> need to allocate and set gbuf } }; __global__ void corespKernel (const CorespSearch cs) { cs (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::findCoresp (const MapArr& vmap_g_curr, const MapArr& nmap_g_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, PtrStepSz<short2> coresp) { CorespSearch cs; cs.vmap_g_curr = vmap_g_curr; cs.nmap_g_curr = nmap_g_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.coresp = coresp; dim3 block (CorespSearch::CTA_SIZE_X, CorespSearch::CTA_SIZE_Y); dim3 grid (divUp (coresp.cols, block.x), divUp (coresp.rows, block.y)); hipLaunchKernelGGL(( corespKernel), dim3(grid), dim3(block), 0, 0, cs); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); }
4e7d7f9d754c369a20d5e41ab7162a0af0a0158b.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <cuda/device.hpp> namespace pcl { namespace device { __device__ unsigned int count = 0; struct CorespSearch { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y }; struct plus { __forceinline__ __device__ int operator () (const int &lhs, const volatile int& rhs) const { return lhs + rhs; } }; PtrStep<float> vmap_g_curr; PtrStep<float> nmap_g_curr; Mat33 Rprev_inv; float3 tprev; Intr intr; PtrStep<float> vmap_g_prev; PtrStep<float> nmap_g_prev; float distThres; float angleThres; mutable PtrStepSz<short2> coresp; mutable int* gbuf; __device__ __forceinline__ int search () const { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= coresp.cols || y >= coresp.rows) return 0; coresp.ptr (y)[x] = make_short2 (-1, -1); float3 ncurr_g; ncurr_g.x = nmap_g_curr.ptr (y)[x]; if (isnan (ncurr_g.x)) return 0; float3 vcurr_g; vcurr_g.x = vmap_g_curr.ptr (y )[x]; vcurr_g.y = vmap_g_curr.ptr (y + coresp.rows)[x]; vcurr_g.z = vmap_g_curr.ptr (y + 2 * coresp.rows)[x]; float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space int2 ukr; //projection ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4 ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4 if (ukr.x < 0 || ukr.y < 0 || ukr.x >= coresp.cols || ukr.y >= coresp.rows) return 0; float3 nprev_g; nprev_g.x = nmap_g_prev.ptr (ukr.y)[ukr.x]; if (isnan (nprev_g.x)) return 0; float3 vprev_g; vprev_g.x = vmap_g_prev.ptr (ukr.y )[ukr.x]; vprev_g.y = vmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; vprev_g.z = vmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float dist = norm (vcurr_g - vprev_g); if (dist > distThres) return 0; ncurr_g.y = nmap_g_curr.ptr (y + coresp.rows)[x]; ncurr_g.z = nmap_g_curr.ptr (y + 2 * coresp.rows)[x]; nprev_g.y = nmap_g_prev.ptr (ukr.y + coresp.rows)[ukr.x]; nprev_g.z = nmap_g_prev.ptr (ukr.y + 2 * coresp.rows)[ukr.x]; float sine = norm (cross (ncurr_g, nprev_g)); /*if (sine >= 1 || asinf(sine) >= angleThres) return 0;*/ if (/*sine >= 1 || */ sine >= angleThres) return 0; coresp.ptr (y)[x] = make_short2 (ukr.x, ukr.y); return 1; } __device__ __forceinline__ void reduce (int i) const { __shared__ volatile int smem[CTA_SIZE]; int tid = Block::flattenedThreadId (); smem[tid] = i; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); __shared__ bool isLastBlockDone; if (tid == 0) { gbuf[blockIdx.x + gridDim.x * blockIdx.y] = smem[0]; __threadfence (); unsigned int value = atomicInc (&count, gridDim.x * gridDim.y); isLastBlockDone = (value == (gridDim.x * gridDim.y - 1)); } __syncthreads (); if (isLastBlockDone) { int sum = 0; int stride = Block::stride (); for (int pos = tid; pos < gridDim.x * gridDim.y; pos += stride) sum += gbuf[pos]; smem[tid] = sum; __syncthreads (); Block::reduce<CTA_SIZE>(smem, plus ()); if (tid == 0) { gbuf[0] = smem[0]; count = 0; } } } __device__ __forceinline__ void operator () () const { int mask = search (); //reduce(mask); if uncomment -> need to allocate and set gbuf } }; __global__ void corespKernel (const CorespSearch cs) { cs (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::findCoresp (const MapArr& vmap_g_curr, const MapArr& nmap_g_curr, const Mat33& Rprev_inv, const float3& tprev, const Intr& intr, const MapArr& vmap_g_prev, const MapArr& nmap_g_prev, float distThres, float angleThres, PtrStepSz<short2> coresp) { CorespSearch cs; cs.vmap_g_curr = vmap_g_curr; cs.nmap_g_curr = nmap_g_curr; cs.Rprev_inv = Rprev_inv; cs.tprev = tprev; cs.intr = intr; cs.vmap_g_prev = vmap_g_prev; cs.nmap_g_prev = nmap_g_prev; cs.distThres = distThres; cs.angleThres = angleThres; cs.coresp = coresp; dim3 block (CorespSearch::CTA_SIZE_X, CorespSearch::CTA_SIZE_Y); dim3 grid (divUp (coresp.cols, block.x), divUp (coresp.rows, block.y)); corespKernel<<<grid, block>>>(cs); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); }
3b1480fbfa2514426483b28ef731cd9ea1713a21.hip
// !!! This is a file automatically generated by hipify!!! #include "AWGController.h" #include <fstream> #define USING_EXTERNAL_TRIGGER 0 // wait for keystroke to switch to next sequence group //#define USING_EXTERNAL_TRIGGER 1 // use external trigger to switch to next sequence group static int BYTES_PER_DATA = 2; static int BIG_NUMBER = pow(2,31)-1; static int SMALL_NUMBER = 1.024*pow(10,7); // AWG Controller constructor AWGController::AWGController(double s_r, output_mode mode, int sw_buf){ //Set card parameters llSWBufSize = sw_buf; sample_rate = MEGA(s_r/(MEGA(1))); //Initialize card and setup card if successful if (bSpcMInitCardByIdx(&stCard, 0)){ setupCard(); changeMode(SEQUENCE); setupSuccess = true; }else{ cout << "Could not connect"; setupSuccess = false; return; } } //Sets up parameters such as buffer information, clockrate, output channels, and mode bool AWGController::setupCard(){ //Print card info printf(pszSpcMPrintCardInfo(&stCard, szBuffer, sizeof(szBuffer))); //The hardware buffer is the length of the memory we write the wave to spcm_dwSetParam_i64(stCard.hDrv, SPC_DATA_OUTBUFSIZE, llHWBufSize); //Sets up the card for writing to memory spcm_dwSetParam_i32(stCard.hDrv, SPC_M2CMD, M2CMD_CARD_WRITESETUP); //Sets clockrate bSpcMSetupClockPLL (&stCard, sample_rate, false); printf ("Sampling rate set to %.1lf MHz\n", (double) stCard.llSetSamplerate / MEGA(1)); //Enables all channels for (int i=0; i < stCard.lMaxChannels; i++) bSpcMSetupAnalogOutputChannel (&stCard, i, 1000, 0, 0); spcm_dwSetParam_i32(stCard.hDrv, SPC_AMP0,2000); spcm_dwSetParam_i32(stCard.hDrv, SPC_AMP1,2000); spcm_dwSetParam_i32(stCard.hDrv, SPC_FILTER0,0); spcm_dwSetParam_i32(stCard.hDrv, SPC_FILTER1,0); spcm_dwSetParam_i32 (stCard.hDrv, SPC_TRIG_ORMASK, SPC_TMASK_NONE); cout << "Printtingg error Status\n"; cout << stCard.bSetError << endl; connected = true; return true; } bool AWGController::changeMode(output_mode mode){ if( mode == SINGLE){ bSpcMSetupModeRepStdLoops (&stCard, 1, KILO_B(64), 0); return spcm_dwSetParam_i32 (stCard.hDrv, SPCM_X0_MODE, SPCM_XMODE_CONTOUTMARK); }else if (mode == SEQUENCE){ bSpcMSetupModeRepSequence(&stCard,2,3); return true; }else{ printf("Mode selection invalid\n"); return false; } } void AWGController::disconnect() { shouldDisconnect = true; vSpcMCloseCard(&stCard); connected = false; } // static int64 g_llOffset = 0; // static int64 g_llXDiv = KILO_B(100); bool AWGController::loadDataBlock(int segSize, signal_type data, vector<Waveform>* waveforms, vector<RearrangementMove>* moves){ // Generate array of pointers to buffer memory int seg; short show; if(data == STATIC){ seg = 0; show = 1; }else if(data == TRANS_EMPTY || data == TRANS){ seg = 1; show = 1; } if(waveforms != NULL){ vector<short> dataVecX = (*waveforms)[0].dataVectorShort; vector<short> dataVecY = (*waveforms)[1].dataVectorShort; for (int64 i = 0; i <segSize; i++){ pvBuffer[i*2] = dataVecX[i%dataVecX.size()]*show; pvBuffer[i*2+1] = dataVecY[i%dataVecY.size()]*show; } } if(segSize%32!=0){ cout << "fail\n"; return false; } // write data to board (main) sample memory spcm_dwSetParam_i32 (stCard.hDrv, SPC_SEQMODE_WRITESEGMENT, seg); spcm_dwSetParam_i32 (stCard.hDrv, SPC_SEQMODE_SEGMENTSIZE, segSize); spcm_dwDefTransfer_i64 (stCard.hDrv, SPCM_BUF_DATA, SPCM_DIR_PCTOCARD, 0, pvBuffer, 0, segSize*stCard.lBytesPerSample*2); spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_DATA_STARTDMA | M2CMD_DATA_WAITDMA); return true; } void AWGController::vWriteStepEntry (ST_SPCM_CARDINFO *pstCard, uint32 dwStepIndex, uint32 dwStepNextIndex, uint32 dwSegmentIndex, uint32 dwLoops, uint32 dwFlags) { uint32 dwErr = 0; uint64 qwSequenceEntry = 0; // setup register value qwSequenceEntry = (dwFlags & ~SPCSEQ_LOOPMASK) | (dwLoops & SPCSEQ_LOOPMASK); qwSequenceEntry <<= 32; qwSequenceEntry |= ((dwStepNextIndex << 16)& SPCSEQ_NEXTSTEPMASK) | (dwSegmentIndex & SPCSEQ_SEGMENTMASK); if (!dwErr) dwErr = spcm_dwSetParam_i64 (pstCard->hDrv, SPC_SEQMODE_STEPMEM0 + dwStepIndex, qwSequenceEntry); } void AWGController::pushStaticWaveforms(vector<Waveform> waveforms, bool first_push) { int dataSize = waveforms[0].dataVectorShort.size()*2; hipHostMalloc((void **)&pvBuffer,dataSize*BYTES_PER_DATA,hipHostMallocPortable); if (!pvBuffer){ nSpcMErrorMessageStdOut(&stCard, "Memory allocation error\n", false); return; } if (!stCard.bSetError) { spcm_dwSetParam_i32(stCard.hDrv, SPC_CHENABLE,CHANNEL0 | CHANNEL1 ); if(first_push){ loadDataBlock(dataSize,STATIC,&waveforms,NULL); loadDataBlock(dataSize,TRANS_EMPTY,&waveforms,NULL); vWriteStepEntry (&stCard, 0, 0, STATIC, 1, SPCSEQ_ENDLOOPONTRIG); vWriteStepEntry (&stCard, 1, 0, TRANS, 1, SPCSEQ_ENDLOOPONTRIG); }else loadDataBlock(dataSize,TRANS,&waveforms,NULL); vWriteStepEntry (&stCard, 0, 1, STATIC, 1, SPCSEQ_ENDLOOPONTRIG); spcm_dwSetParam_i32 (stCard.hDrv, SPC_SEQMODE_STARTSTEP, 0); // check for error code if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szBuffer)){ vFreeMemPageAligned (pvBuffer, llSWBufSize); nSpcMErrorMessageStdOut (&stCard, szBuffer, false); printf( "Data transfer failed. Freeing memory\n"); } }else cout << "Error detected in card... Abortinig data transfer \n"; vFreeMemPageAligned (pvBuffer, (int32)llSWBufSize); return; } void AWGController::pushRearrangeWaveforms(int num_moves, int move_size){ cout << num_moves << " " << move_size << endl; pvBuffer = pvBufferDynamic; if (!pvBuffer){ nSpcMErrorMessageStdOut(&stCard, "Memory allocation error\n", false); return; } if (!stCard.bSetError){ loadDataBlock(move_size*num_moves,TRANS,NULL,NULL); vWriteStepEntry (&stCard, 0, 1, STATIC, 1, SPCSEQ_ENDLOOPONTRIG); vWriteStepEntry (&stCard, 1, 0, TRANS, 1, SPCSEQ_ENDLOOPONTRIG); // check for error code if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szBuffer)){ vFreeMemPageAligned (pvBuffer, llSWBufSize); nSpcMErrorMessageStdOut (&stCard, szBuffer, false); printf( "Data transfer failed. Freeing memory\n"); } }else cout << "Error detected in card... Abortinig data transfer \n"; } void AWGController::triggerSequence(){ spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_FORCETRIGGER); } bool AWGController::isConnected() { return connected; } bool AWGController::run(int timeout, int channel){ spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_FORCETRIGGER); if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szErrorText) != ERR_OK) { cout << (szErrorText) << endl; // print the error text return false; } spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_FORCETRIGGER); cout << "Running..."; return true; } void AWGController::stop(){ spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_STOP); if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szErrorText) != ERR_OK){ cout << (szErrorText); }else printf("Stopping card... "); } void AWGController::errorPrint(bool dwErr, string error){ if (dwErr) cout << endl << error << endl << dwErr << endl; } void AWGController::allocateDynamicWFBuffer(float duration, int x_dim, int y_dim){ size_t bufferSize = 2*(x_dim+y_dim)*sample_rate*duration*2*BYTES_PER_DATA/1000; //this is the theoretical max buffer size //based on our current rearrangement algorithm // pvBufferDynamic = (short*)pvAllocMemPageAligned(bufferSize); hipHostMalloc((void **)&pvBufferDynamic,bufferSize,hipHostMallocPortable); cout << "Allocated Host Buffer Memory: " << bufferSize << endl; if(numDevices == 1){ hipError_t err = hipSuccess; //move the buffer to the GPU hipSetDevice(defaultDevice); err = hipMalloc((void **)&cudaBuffer, bufferSize); if(err != hipSuccess){cout << "Memory Allocation Error (buffer)" << endl;} cout << "Allocated Device Buffer Memory: " << bufferSize << endl; } if(numDevices == 2){//if using 2 devices, make 1 buffer of half the allotted size //on each device, so that they can be calculated in parallel hipError_t err = hipSuccess; //move the buffer to the GPU hipSetDevice(0); err = hipMalloc((void **)&cudaBuffer, bufferSize/2); if(err != hipSuccess){cout << "Memory Allocation Error (buffer)" << endl;} hipSetDevice(1); err = hipMalloc((void **)&cudaBuffer2, bufferSize/2); if(err != hipSuccess){cout << "Memory Allocation Error (buffer)" << endl;} cout << "Allocated Device Buffer Memory: " << bufferSize/2 << " (x2)"<< endl; } } short* AWGController::getCudaBuffer(){ return cudaBuffer; } short* AWGController::getCudaBuffer2(){ return cudaBuffer2; } short* AWGController::getDynamicBuffer(){ return pvBufferDynamic; } void AWGController::cleanCudaBuffer(){ //free buffer from GPU memory if(numDevices == 1){ hipSetDevice(defaultDevice); hipFree(cudaBuffer); } if(numDevices == 2){ hipSetDevice(0); hipFree(cudaBuffer); hipSetDevice(1); hipFree(cudaBuffer2); } }
3b1480fbfa2514426483b28ef731cd9ea1713a21.cu
#include "AWGController.h" #include <fstream> #define USING_EXTERNAL_TRIGGER 0 // wait for keystroke to switch to next sequence group //#define USING_EXTERNAL_TRIGGER 1 // use external trigger to switch to next sequence group static int BYTES_PER_DATA = 2; static int BIG_NUMBER = pow(2,31)-1; static int SMALL_NUMBER = 1.024*pow(10,7); // AWG Controller constructor AWGController::AWGController(double s_r, output_mode mode, int sw_buf){ //Set card parameters llSWBufSize = sw_buf; sample_rate = MEGA(s_r/(MEGA(1))); //Initialize card and setup card if successful if (bSpcMInitCardByIdx(&stCard, 0)){ setupCard(); changeMode(SEQUENCE); setupSuccess = true; }else{ cout << "Could not connect"; setupSuccess = false; return; } } //Sets up parameters such as buffer information, clockrate, output channels, and mode bool AWGController::setupCard(){ //Print card info printf(pszSpcMPrintCardInfo(&stCard, szBuffer, sizeof(szBuffer))); //The hardware buffer is the length of the memory we write the wave to spcm_dwSetParam_i64(stCard.hDrv, SPC_DATA_OUTBUFSIZE, llHWBufSize); //Sets up the card for writing to memory spcm_dwSetParam_i32(stCard.hDrv, SPC_M2CMD, M2CMD_CARD_WRITESETUP); //Sets clockrate bSpcMSetupClockPLL (&stCard, sample_rate, false); printf ("Sampling rate set to %.1lf MHz\n", (double) stCard.llSetSamplerate / MEGA(1)); //Enables all channels for (int i=0; i < stCard.lMaxChannels; i++) bSpcMSetupAnalogOutputChannel (&stCard, i, 1000, 0, 0); spcm_dwSetParam_i32(stCard.hDrv, SPC_AMP0,2000); spcm_dwSetParam_i32(stCard.hDrv, SPC_AMP1,2000); spcm_dwSetParam_i32(stCard.hDrv, SPC_FILTER0,0); spcm_dwSetParam_i32(stCard.hDrv, SPC_FILTER1,0); spcm_dwSetParam_i32 (stCard.hDrv, SPC_TRIG_ORMASK, SPC_TMASK_NONE); cout << "Printtingg error Status\n"; cout << stCard.bSetError << endl; connected = true; return true; } bool AWGController::changeMode(output_mode mode){ if( mode == SINGLE){ bSpcMSetupModeRepStdLoops (&stCard, 1, KILO_B(64), 0); return spcm_dwSetParam_i32 (stCard.hDrv, SPCM_X0_MODE, SPCM_XMODE_CONTOUTMARK); }else if (mode == SEQUENCE){ bSpcMSetupModeRepSequence(&stCard,2,3); return true; }else{ printf("Mode selection invalid\n"); return false; } } void AWGController::disconnect() { shouldDisconnect = true; vSpcMCloseCard(&stCard); connected = false; } // static int64 g_llOffset = 0; // static int64 g_llXDiv = KILO_B(100); bool AWGController::loadDataBlock(int segSize, signal_type data, vector<Waveform>* waveforms, vector<RearrangementMove>* moves){ // Generate array of pointers to buffer memory int seg; short show; if(data == STATIC){ seg = 0; show = 1; }else if(data == TRANS_EMPTY || data == TRANS){ seg = 1; show = 1; } if(waveforms != NULL){ vector<short> dataVecX = (*waveforms)[0].dataVectorShort; vector<short> dataVecY = (*waveforms)[1].dataVectorShort; for (int64 i = 0; i <segSize; i++){ pvBuffer[i*2] = dataVecX[i%dataVecX.size()]*show; pvBuffer[i*2+1] = dataVecY[i%dataVecY.size()]*show; } } if(segSize%32!=0){ cout << "fail\n"; return false; } // write data to board (main) sample memory spcm_dwSetParam_i32 (stCard.hDrv, SPC_SEQMODE_WRITESEGMENT, seg); spcm_dwSetParam_i32 (stCard.hDrv, SPC_SEQMODE_SEGMENTSIZE, segSize); spcm_dwDefTransfer_i64 (stCard.hDrv, SPCM_BUF_DATA, SPCM_DIR_PCTOCARD, 0, pvBuffer, 0, segSize*stCard.lBytesPerSample*2); spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_DATA_STARTDMA | M2CMD_DATA_WAITDMA); return true; } void AWGController::vWriteStepEntry (ST_SPCM_CARDINFO *pstCard, uint32 dwStepIndex, uint32 dwStepNextIndex, uint32 dwSegmentIndex, uint32 dwLoops, uint32 dwFlags) { uint32 dwErr = 0; uint64 qwSequenceEntry = 0; // setup register value qwSequenceEntry = (dwFlags & ~SPCSEQ_LOOPMASK) | (dwLoops & SPCSEQ_LOOPMASK); qwSequenceEntry <<= 32; qwSequenceEntry |= ((dwStepNextIndex << 16)& SPCSEQ_NEXTSTEPMASK) | (dwSegmentIndex & SPCSEQ_SEGMENTMASK); if (!dwErr) dwErr = spcm_dwSetParam_i64 (pstCard->hDrv, SPC_SEQMODE_STEPMEM0 + dwStepIndex, qwSequenceEntry); } void AWGController::pushStaticWaveforms(vector<Waveform> waveforms, bool first_push) { int dataSize = waveforms[0].dataVectorShort.size()*2; cudaHostAlloc((void **)&pvBuffer,dataSize*BYTES_PER_DATA,cudaHostAllocPortable); if (!pvBuffer){ nSpcMErrorMessageStdOut(&stCard, "Memory allocation error\n", false); return; } if (!stCard.bSetError) { spcm_dwSetParam_i32(stCard.hDrv, SPC_CHENABLE,CHANNEL0 | CHANNEL1 ); if(first_push){ loadDataBlock(dataSize,STATIC,&waveforms,NULL); loadDataBlock(dataSize,TRANS_EMPTY,&waveforms,NULL); vWriteStepEntry (&stCard, 0, 0, STATIC, 1, SPCSEQ_ENDLOOPONTRIG); vWriteStepEntry (&stCard, 1, 0, TRANS, 1, SPCSEQ_ENDLOOPONTRIG); }else loadDataBlock(dataSize,TRANS,&waveforms,NULL); vWriteStepEntry (&stCard, 0, 1, STATIC, 1, SPCSEQ_ENDLOOPONTRIG); spcm_dwSetParam_i32 (stCard.hDrv, SPC_SEQMODE_STARTSTEP, 0); // check for error code if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szBuffer)){ vFreeMemPageAligned (pvBuffer, llSWBufSize); nSpcMErrorMessageStdOut (&stCard, szBuffer, false); printf( "Data transfer failed. Freeing memory\n"); } }else cout << "Error detected in card... Abortinig data transfer \n"; vFreeMemPageAligned (pvBuffer, (int32)llSWBufSize); return; } void AWGController::pushRearrangeWaveforms(int num_moves, int move_size){ cout << num_moves << " " << move_size << endl; pvBuffer = pvBufferDynamic; if (!pvBuffer){ nSpcMErrorMessageStdOut(&stCard, "Memory allocation error\n", false); return; } if (!stCard.bSetError){ loadDataBlock(move_size*num_moves,TRANS,NULL,NULL); vWriteStepEntry (&stCard, 0, 1, STATIC, 1, SPCSEQ_ENDLOOPONTRIG); vWriteStepEntry (&stCard, 1, 0, TRANS, 1, SPCSEQ_ENDLOOPONTRIG); // check for error code if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szBuffer)){ vFreeMemPageAligned (pvBuffer, llSWBufSize); nSpcMErrorMessageStdOut (&stCard, szBuffer, false); printf( "Data transfer failed. Freeing memory\n"); } }else cout << "Error detected in card... Abortinig data transfer \n"; } void AWGController::triggerSequence(){ spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_FORCETRIGGER); } bool AWGController::isConnected() { return connected; } bool AWGController::run(int timeout, int channel){ spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_FORCETRIGGER); if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szErrorText) != ERR_OK) { cout << (szErrorText) << endl; // print the error text return false; } spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_FORCETRIGGER); cout << "Running..."; return true; } void AWGController::stop(){ spcm_dwSetParam_i32 (stCard.hDrv, SPC_M2CMD, M2CMD_CARD_STOP); if (spcm_dwGetErrorInfo_i32 (stCard.hDrv, NULL, NULL, szErrorText) != ERR_OK){ cout << (szErrorText); }else printf("Stopping card... "); } void AWGController::errorPrint(bool dwErr, string error){ if (dwErr) cout << endl << error << endl << dwErr << endl; } void AWGController::allocateDynamicWFBuffer(float duration, int x_dim, int y_dim){ size_t bufferSize = 2*(x_dim+y_dim)*sample_rate*duration*2*BYTES_PER_DATA/1000; //this is the theoretical max buffer size //based on our current rearrangement algorithm // pvBufferDynamic = (short*)pvAllocMemPageAligned(bufferSize); cudaHostAlloc((void **)&pvBufferDynamic,bufferSize,cudaHostAllocPortable); cout << "Allocated Host Buffer Memory: " << bufferSize << endl; if(numDevices == 1){ cudaError_t err = cudaSuccess; //move the buffer to the GPU cudaSetDevice(defaultDevice); err = cudaMalloc((void **)&cudaBuffer, bufferSize); if(err != cudaSuccess){cout << "Memory Allocation Error (buffer)" << endl;} cout << "Allocated Device Buffer Memory: " << bufferSize << endl; } if(numDevices == 2){//if using 2 devices, make 1 buffer of half the allotted size //on each device, so that they can be calculated in parallel cudaError_t err = cudaSuccess; //move the buffer to the GPU cudaSetDevice(0); err = cudaMalloc((void **)&cudaBuffer, bufferSize/2); if(err != cudaSuccess){cout << "Memory Allocation Error (buffer)" << endl;} cudaSetDevice(1); err = cudaMalloc((void **)&cudaBuffer2, bufferSize/2); if(err != cudaSuccess){cout << "Memory Allocation Error (buffer)" << endl;} cout << "Allocated Device Buffer Memory: " << bufferSize/2 << " (x2)"<< endl; } } short* AWGController::getCudaBuffer(){ return cudaBuffer; } short* AWGController::getCudaBuffer2(){ return cudaBuffer2; } short* AWGController::getDynamicBuffer(){ return pvBufferDynamic; } void AWGController::cleanCudaBuffer(){ //free buffer from GPU memory if(numDevices == 1){ cudaSetDevice(defaultDevice); cudaFree(cudaBuffer); } if(numDevices == 2){ cudaSetDevice(0); cudaFree(cudaBuffer); cudaSetDevice(1); cudaFree(cudaBuffer2); } }
4c4368bc117bb4579cda9b9f16d830828015aafc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> //#include<cuda.h> #include<cuda_runtime.h> #define N 4 #define BLOCK_DIM 4 __global__ void matrixAdd (int *dev_a); int main() { int a[N*N]={}; int i; for(i=0;i<16;i++) { printf("Enter the %dth element= ",i); // a[i]=i*2; scanf("%d",&a[i]); } int *dev_a; //int dev_b; int size = N * N * sizeof(int); // initialize a and b with real values (NOT SHOWN) hipMalloc((void**)&dev_a, size); //hipMalloc((void**)&dev_b, size); //hipMalloc((void**)&dev_c, size); hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice); //hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));hipLaunchKernelGGL(( matrixAdd), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_a); hipMemcpy(a, dev_a, size, hipMemcpyDeviceToHost); hipFree(dev_a); //hipFree(dev_b); } __global__ void matrixAdd (int *dev_a) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col + row * N; //dev_b=index; //} if (col < N && row < N) { //c[index] = a[index] + b[index]; printf("%d\n",dev_a[index]); } }
4c4368bc117bb4579cda9b9f16d830828015aafc.cu
#include<stdio.h> //#include<cuda.h> #include<cuda_runtime.h> #define N 4 #define BLOCK_DIM 4 __global__ void matrixAdd (int *dev_a); int main() { int a[N*N]={}; int i; for(i=0;i<16;i++) { printf("Enter the %dth element= ",i); // a[i]=i*2; scanf("%d",&a[i]); } int *dev_a; //int dev_b; int size = N * N * sizeof(int); // initialize a and b with real values (NOT SHOWN) cudaMalloc((void**)&dev_a, size); //cudaMalloc((void**)&dev_b, size); //cudaMalloc((void**)&dev_c, size); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); //cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y)); matrixAdd<<<dimGrid,dimBlock>>>(dev_a); cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost); cudaFree(dev_a); //cudaFree(dev_b); } __global__ void matrixAdd (int *dev_a) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col + row * N; //dev_b=index; //} if (col < N && row < N) { //c[index] = a[index] + b[index]; printf("%d\n",dev_a[index]); } }
0f257430d5dd3b468f406884eb202a2562f74c72.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "util.hpp" // host implementation of dot product double dot_host(const double *x, const double* y, int n) { double sum = 0; for(auto i=0; i<n; ++i) { sum += x[i]*y[i]; } return sum; } // Works for nthreads=WIDTH<=1024 and n<=1024 template <int WIDTH> __global__ void dot_gpu_kernel(const double *x, const double* y, double *result, int n) { __shared__ double buf[WIDTH]; int i = threadIdx.x; buf[i] = i<n? x[i]*y[i]: 0; int width = WIDTH/2; while (width) { __syncthreads(); if (i<width) { buf[i] += buf[i+width]; } width /= 2; } if (!i) { *result = *buf; } } // Works for arbitrary n with a single thread block template <int WIDTH> __global__ void dot_gpu_kernel2(const double *x, const double* y, double *result, int n) { __shared__ double buf[WIDTH]; int i = threadIdx.x; int gid = i; buf[i] = 0; while (gid<n) { buf[i] += x[gid]*y[gid]; gid += WIDTH; } int width = WIDTH/2; while (width) { __syncthreads(); if (i<width) { buf[i] += buf[i+width]; } width /= 2; } if (!i) { *result = *buf; } } template <int WIDTH> __global__ void dot_gpu_kernel_full(const double *x, const double* y, double *result, int n) { __shared__ double buf[WIDTH]; int i = threadIdx.x; int gid = i + blockIdx.x*blockDim.x; buf[i] = gid<n? x[gid]*y[gid]: 0; int width = WIDTH/2; while (width) { __syncthreads(); if (i<width) { buf[i] += buf[i+width]; } width /= 2; } if (!i) { atomicAdd(result, *buf); } } #define WIDTH 128 double dot_gpu(const double *x, const double* y, int n) { static double* result = malloc_managed<double>(1); //dot_gpu_kernel<WIDTH><<<1, WIDTH>>>(x, y, result, n); //dot_gpu_kernel2<WIDTH><<<1, WIDTH>>>(x, y, result, n); hipLaunchKernelGGL(( dot_gpu_kernel_full<WIDTH>), dim3((n-1)/WIDTH+1), dim3(WIDTH), 0, 0, x, y, result, n); hipDeviceSynchronize(); return *result; } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 4); size_t n = (1 << pow); n = pow; auto size_in_bytes = n * sizeof(double); std::cout << "dot product CUDA of length n = " << n << " : " << size_in_bytes*1e-9 << "MB\n"; auto x_h = malloc_host<double>(n, 2.); auto y_h = malloc_host<double>(n); for(auto i=0; i<n; ++i) { y_h[i] = rand()%10; } auto x_d = malloc_device<double>(n); auto y_d = malloc_device<double>(n); // copy initial conditions to device copy_to_device<double>(x_h, x_d, n); copy_to_device<double>(y_h, y_d, n); auto result = dot_gpu(x_d, y_d, n); auto expected = dot_host(x_h, y_h, n); printf("expected %f got %f\n", (float)expected, (float)result); return 0; }
0f257430d5dd3b468f406884eb202a2562f74c72.cu
#include <iostream> #include <cuda.h> #include "util.hpp" // host implementation of dot product double dot_host(const double *x, const double* y, int n) { double sum = 0; for(auto i=0; i<n; ++i) { sum += x[i]*y[i]; } return sum; } // Works for nthreads=WIDTH<=1024 and n<=1024 template <int WIDTH> __global__ void dot_gpu_kernel(const double *x, const double* y, double *result, int n) { __shared__ double buf[WIDTH]; int i = threadIdx.x; buf[i] = i<n? x[i]*y[i]: 0; int width = WIDTH/2; while (width) { __syncthreads(); if (i<width) { buf[i] += buf[i+width]; } width /= 2; } if (!i) { *result = *buf; } } // Works for arbitrary n with a single thread block template <int WIDTH> __global__ void dot_gpu_kernel2(const double *x, const double* y, double *result, int n) { __shared__ double buf[WIDTH]; int i = threadIdx.x; int gid = i; buf[i] = 0; while (gid<n) { buf[i] += x[gid]*y[gid]; gid += WIDTH; } int width = WIDTH/2; while (width) { __syncthreads(); if (i<width) { buf[i] += buf[i+width]; } width /= 2; } if (!i) { *result = *buf; } } template <int WIDTH> __global__ void dot_gpu_kernel_full(const double *x, const double* y, double *result, int n) { __shared__ double buf[WIDTH]; int i = threadIdx.x; int gid = i + blockIdx.x*blockDim.x; buf[i] = gid<n? x[gid]*y[gid]: 0; int width = WIDTH/2; while (width) { __syncthreads(); if (i<width) { buf[i] += buf[i+width]; } width /= 2; } if (!i) { atomicAdd(result, *buf); } } #define WIDTH 128 double dot_gpu(const double *x, const double* y, int n) { static double* result = malloc_managed<double>(1); //dot_gpu_kernel<WIDTH><<<1, WIDTH>>>(x, y, result, n); //dot_gpu_kernel2<WIDTH><<<1, WIDTH>>>(x, y, result, n); dot_gpu_kernel_full<WIDTH><<<(n-1)/WIDTH+1, WIDTH>>>(x, y, result, n); cudaDeviceSynchronize(); return *result; } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 4); size_t n = (1 << pow); n = pow; auto size_in_bytes = n * sizeof(double); std::cout << "dot product CUDA of length n = " << n << " : " << size_in_bytes*1e-9 << "MB\n"; auto x_h = malloc_host<double>(n, 2.); auto y_h = malloc_host<double>(n); for(auto i=0; i<n; ++i) { y_h[i] = rand()%10; } auto x_d = malloc_device<double>(n); auto y_d = malloc_device<double>(n); // copy initial conditions to device copy_to_device<double>(x_h, x_d, n); copy_to_device<double>(y_h, y_d, n); auto result = dot_gpu(x_d, y_d, n); auto expected = dot_host(x_h, y_h, n); printf("expected %f got %f\n", (float)expected, (float)result); return 0; }
741c783585872b15c22bf3b371d63df0be0ec11d.hip
// !!! This is a file automatically generated by hipify!!! //Realizado por Julio Ballesteros #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include "kernel.h" #define BLOCK_SIZE 512 float mask[] = { 0.1,0.1,0.1, 0.1,0.1,0.1, 0.1,0.1,0.1 }; extern __shared__ uint8 temp[]; __global__ void filtro_k(uint8* img, float* filtro, uint32 imgW, uint32 imgH, uint32 filW, uint32 filH, uint8* imgResult) { int gi, gj, lindex, gindex, x, y, threadID; uint8 pixOrig; uint8 pixResult; threadID = blockIdx.x*blockDim.x + threadIdx.x; if (threadID >= (imgW*imgH)) return; gi = threadID / imgW; gj = threadID % imgW; gindex = gi * imgW + gj; lindex = threadIdx.x + imgW + 3; temp[lindex] = img[gindex]; if (threadIdx.x < imgW + 3) { if(gindex >= imgW + 3) temp[lindex - (imgW + 3)] = img[gindex - (imgW + 3)]; if(gindex < imgW * imgH - (imgW + 3)) temp[lindex + blockDim.x] = img[gindex + blockDim.x]; } if (gi == 0 || gi == imgH - 1) return; if (gj < 3 || gj >= imgW - 3) return; __syncthreads(); pixOrig = 0; pixResult = 0; for (x = -1; x < 2; x++) for (y = -1; y < 2; y++) { pixOrig = temp[lindex + x*imgW + y*3]; pixResult += pixOrig*filtro[(x + 1) + ((y + 1) * 3)]; } imgResult[gindex] = pixResult; } uint8* applyFilterGPU(uint8* img, float* filtro, uint32 imgW, uint32 imgH, uint32 filW, uint32 filH) { uint8 i, j; uint8* img_d = NULL; float* filtro_d = NULL; uint8* imgResult_d = NULL; uint8* imgResult_h = NULL; uint8* amplifiedImg = NULL; uint8* imgResult = NULL; hipMalloc((void**)&img_d, sizeof(uint8)* 3 * (imgW + 2)*(imgH + 2)); hipMalloc((void**)&imgResult_d, sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3); hipMalloc((void**)&filtro_d, sizeof(float)*filH * filW); imgResult_h = (uint8*)malloc(sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3); amplifiedImg = (uint8*)malloc(3 * (imgW + 2)*(imgH + 2)); imgResult = (uint8*)malloc(sizeof(uint8)*imgW*imgH * 3); memset(amplifiedImg, 0, 3 * (imgW + 2)*(imgH + 2)); for (i = 1; i <= imgH; i++) for (j = 1; j <= imgW; j++) { ((pixel_t*)amplifiedImg)[j + i * (imgW + 2)] = ((pixel_t*)img)[(j - 1) + (i - 1)*imgW]; } hipMemcpy(img_d, amplifiedImg, sizeof(uint8) * 3 * (imgW + 2)*(imgH + 2), hipMemcpyHostToDevice); hipMemcpy(filtro_d, filtro, sizeof(float)*filW * filH, hipMemcpyHostToDevice); hipMemset(imgResult_d, 0xFF, sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3); memset(imgResult_h, 0xFF, sizeof(uint8)* (imgW + 2)*(imgH + 2) * 3); memset(imgResult, 0xFF, sizeof(uint8)* imgW * imgH * 3); int numThreadsBloque = BLOCK_SIZE; int numBloques = ((imgW + 2)*(imgH + 2)*3) / numThreadsBloque + 1; filtro_k << < numBloques, numThreadsBloque, sizeof(uint8) * BLOCK_SIZE + ((imgW + 2 + 1) * 2) * 3 >> > (img_d, filtro_d, (imgW + 2)*3, imgH + 2, filW, filH, imgResult_d); hipDeviceSynchronize(); hipMemcpy(imgResult_h, imgResult_d, sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3, hipMemcpyDeviceToHost); for (i = 0; i < imgH; i++) for (j = 0; j < imgW; j++) { ((pixel_t*)imgResult)[j + i * imgW] = ((pixel_t*)imgResult_h)[(j + 1) + (i + 1)*(imgW + 2)]; } hipFree(img_d); hipFree(filtro_d); hipFree(imgResult_d); free(imgResult_h); return imgResult; } uint8* applyFilterCPU(uint8* img, float* filtro, uint32 imgW, uint32 imgH, uint32 filW, uint32 filH) { uint32 i, j; int x, y; pixel_t pixOrig; pixel_t pixResult; uint8* imgResult = NULL; uint8* amplifiedImg = NULL; imgResult = (uint8*)malloc(sizeof(uint8)*imgW*imgH * 3); memset(imgResult, 0xFF, sizeof(uint8)*imgW*imgH * 3); amplifiedImg = (uint8*)malloc(3 * (imgW + 2)*(imgH + 2)); memset(amplifiedImg, 0, 3 * (imgW + 2)*(imgH + 2)); for (i = 1; i <= imgH; i++) for (j = 1; j <= imgW; j++) { ((pixel_t*)amplifiedImg)[j + i * (imgW + 2)] = ((pixel_t*)img)[(j - 1) + (i - 1)*imgW]; } for (i = 0; i < imgH; i++) for (j = 0; j < imgW; j ++) { pixOrig.R = pixOrig.G = pixOrig.B = 0; pixResult.R = pixResult.G = pixResult.B = 0; for (y = -1; y < 2; y++) for (x = -1; x < 2; x++) { pixOrig = ((pixel_t*)amplifiedImg)[(j + x + 1) + ((i + y + 1) * (imgW + 2))]; pixResult.R += pixOrig.R*filtro[(x + 1) + ((y + 1) * 3)]; pixResult.G += pixOrig.G*filtro[(x + 1) + ((y + 1) * 3)]; pixResult.B += pixOrig.B*filtro[(x + 1) + ((y + 1) * 3)]; } ((pixel_t*)imgResult)[j + i * imgW] = pixResult; } return imgResult; }
741c783585872b15c22bf3b371d63df0be0ec11d.cu
//Realizado por Julio Ballesteros #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include "kernel.h" #define BLOCK_SIZE 512 float mask[] = { 0.1,0.1,0.1, 0.1,0.1,0.1, 0.1,0.1,0.1 }; extern __shared__ uint8 temp[]; __global__ void filtro_k(uint8* img, float* filtro, uint32 imgW, uint32 imgH, uint32 filW, uint32 filH, uint8* imgResult) { int gi, gj, lindex, gindex, x, y, threadID; uint8 pixOrig; uint8 pixResult; threadID = blockIdx.x*blockDim.x + threadIdx.x; if (threadID >= (imgW*imgH)) return; gi = threadID / imgW; gj = threadID % imgW; gindex = gi * imgW + gj; lindex = threadIdx.x + imgW + 3; temp[lindex] = img[gindex]; if (threadIdx.x < imgW + 3) { if(gindex >= imgW + 3) temp[lindex - (imgW + 3)] = img[gindex - (imgW + 3)]; if(gindex < imgW * imgH - (imgW + 3)) temp[lindex + blockDim.x] = img[gindex + blockDim.x]; } if (gi == 0 || gi == imgH - 1) return; if (gj < 3 || gj >= imgW - 3) return; __syncthreads(); pixOrig = 0; pixResult = 0; for (x = -1; x < 2; x++) for (y = -1; y < 2; y++) { pixOrig = temp[lindex + x*imgW + y*3]; pixResult += pixOrig*filtro[(x + 1) + ((y + 1) * 3)]; } imgResult[gindex] = pixResult; } uint8* applyFilterGPU(uint8* img, float* filtro, uint32 imgW, uint32 imgH, uint32 filW, uint32 filH) { uint8 i, j; uint8* img_d = NULL; float* filtro_d = NULL; uint8* imgResult_d = NULL; uint8* imgResult_h = NULL; uint8* amplifiedImg = NULL; uint8* imgResult = NULL; cudaMalloc((void**)&img_d, sizeof(uint8)* 3 * (imgW + 2)*(imgH + 2)); cudaMalloc((void**)&imgResult_d, sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3); cudaMalloc((void**)&filtro_d, sizeof(float)*filH * filW); imgResult_h = (uint8*)malloc(sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3); amplifiedImg = (uint8*)malloc(3 * (imgW + 2)*(imgH + 2)); imgResult = (uint8*)malloc(sizeof(uint8)*imgW*imgH * 3); memset(amplifiedImg, 0, 3 * (imgW + 2)*(imgH + 2)); for (i = 1; i <= imgH; i++) for (j = 1; j <= imgW; j++) { ((pixel_t*)amplifiedImg)[j + i * (imgW + 2)] = ((pixel_t*)img)[(j - 1) + (i - 1)*imgW]; } cudaMemcpy(img_d, amplifiedImg, sizeof(uint8) * 3 * (imgW + 2)*(imgH + 2), cudaMemcpyHostToDevice); cudaMemcpy(filtro_d, filtro, sizeof(float)*filW * filH, cudaMemcpyHostToDevice); cudaMemset(imgResult_d, 0xFF, sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3); memset(imgResult_h, 0xFF, sizeof(uint8)* (imgW + 2)*(imgH + 2) * 3); memset(imgResult, 0xFF, sizeof(uint8)* imgW * imgH * 3); int numThreadsBloque = BLOCK_SIZE; int numBloques = ((imgW + 2)*(imgH + 2)*3) / numThreadsBloque + 1; filtro_k << < numBloques, numThreadsBloque, sizeof(uint8) * BLOCK_SIZE + ((imgW + 2 + 1) * 2) * 3 >> > (img_d, filtro_d, (imgW + 2)*3, imgH + 2, filW, filH, imgResult_d); cudaDeviceSynchronize(); cudaMemcpy(imgResult_h, imgResult_d, sizeof(uint8)*(imgW + 2)*(imgH + 2) * 3, cudaMemcpyDeviceToHost); for (i = 0; i < imgH; i++) for (j = 0; j < imgW; j++) { ((pixel_t*)imgResult)[j + i * imgW] = ((pixel_t*)imgResult_h)[(j + 1) + (i + 1)*(imgW + 2)]; } cudaFree(img_d); cudaFree(filtro_d); cudaFree(imgResult_d); free(imgResult_h); return imgResult; } uint8* applyFilterCPU(uint8* img, float* filtro, uint32 imgW, uint32 imgH, uint32 filW, uint32 filH) { uint32 i, j; int x, y; pixel_t pixOrig; pixel_t pixResult; uint8* imgResult = NULL; uint8* amplifiedImg = NULL; imgResult = (uint8*)malloc(sizeof(uint8)*imgW*imgH * 3); memset(imgResult, 0xFF, sizeof(uint8)*imgW*imgH * 3); amplifiedImg = (uint8*)malloc(3 * (imgW + 2)*(imgH + 2)); memset(amplifiedImg, 0, 3 * (imgW + 2)*(imgH + 2)); for (i = 1; i <= imgH; i++) for (j = 1; j <= imgW; j++) { ((pixel_t*)amplifiedImg)[j + i * (imgW + 2)] = ((pixel_t*)img)[(j - 1) + (i - 1)*imgW]; } for (i = 0; i < imgH; i++) for (j = 0; j < imgW; j ++) { pixOrig.R = pixOrig.G = pixOrig.B = 0; pixResult.R = pixResult.G = pixResult.B = 0; for (y = -1; y < 2; y++) for (x = -1; x < 2; x++) { pixOrig = ((pixel_t*)amplifiedImg)[(j + x + 1) + ((i + y + 1) * (imgW + 2))]; pixResult.R += pixOrig.R*filtro[(x + 1) + ((y + 1) * 3)]; pixResult.G += pixOrig.G*filtro[(x + 1) + ((y + 1) * 3)]; pixResult.B += pixOrig.B*filtro[(x + 1) + ((y + 1) * 3)]; } ((pixel_t*)imgResult)[j + i * imgW] = pixResult; } return imgResult; }
4f144d70afd9b6ba7cd047eaac15463b29fd972a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "iostream" #include "stdlib.h" #include <thread> // std::this_thread::sleep_for #include <chrono> // std::chrono::seconds #include "time.h" #include <ctime> #include "fstream" using namespace std; int getPos(int m, int n, const int width) { return m* width + n; } void printCells(int* cells, int const height, int const width) { for (int i = 0; i < height + 2; i++) { for (int j = 0; j < width + 2; j++) { if (cells[getPos(i, j, width)] == 1) { cout << "O" << " "; } else { cout << "-" << " "; } } cout << endl; } cout << endl; std::this_thread::sleep_for(std::chrono::milliseconds(100)); system("cls"); } void populateArray(int* cellArray, int arraySize) { for (int i = 0; i < arraySize; i++) { cellArray[i] = rand() % 2; } } __device__ int getX(int i, int width) { return i % width; } __device__ int getY(int i, int width) { return i / width; } __device__ int getI(int m, int n, int width) { return m * width + n; } //Gets the neigbour cells via von Neuman Neigbourhood __device__ int getNeigbours(int m, int n, int* cells, int width, int height) { int neigbours = 0; for (int i = m - 1; i <= m + 1; i++) { for (int j = n - 1; j <= n + 1; j++) { if (i >= 0 && i < height && j >= 0 && j < width) { neigbours += cells[getI(i, j, width)]; } else { neigbours += cells[getI((i + height) % height, (j + width) % width, width)]; } } } return neigbours; } // rules that determines the state of the cell __device__ int rules(int neigbours, int state) { int n = neigbours - state; if (state == 1) { if (n > 1 && n < 4) { return 1; } else { return 0; } } else { if (n == 3){ return 1; } return 0; } } // creates the new state of the world __global__ void evolve(int* cells, const int height, const int width, const int arraySize, const int cellsPerThread) { extern __shared__ int sharedCells[]; int i = threadIdx.x + blockIdx.x * blockDim.x; for (int k = i * cellsPerThread; k < ((i + 1) * cellsPerThread); k++) { sharedCells[k] = cells[k]; int x, y, neigbours; x = getX(k, width); y = getY(k, width); neigbours = getNeigbours(y, x, sharedCells, width, height); cells[k] = rules(neigbours, sharedCells[getI(y, x, width)]); __syncthreads(); } } // Runs the simulation int main() { srand(1); const int height = 100, width = 100, arraySize = 10000, timeSteps = 10000, cellsPerThread = 10, gridSize = 10; char b; int* cells; // CPU int* cellsDev; // GPU cells = (int*)malloc(sizeof(int)*arraySize); // creating arrays populateArray(cells, arraySize); hipMalloc((void**)&cellsDev, sizeof(float)*arraySize); // creating space on gpu hipMemcpy(cellsDev, cells, sizeof(int)*arraySize, hipMemcpyHostToDevice); // copying arrays to gpu clock_t begin = clock(); for (int i = 1; i < timeSteps; i++) { hipLaunchKernelGGL(( evolve) , dim3(gridSize), dim3(arraySize / cellsPerThread / gridSize) , 0, 0, cellsDev, height, width, arraySize, cellsPerThread); // running evolution iteration } clock_t end = clock(); hipMemcpy(cells, cellsDev, sizeof(int)*arraySize, hipMemcpyDeviceToHost); // copying cells back from gpu to cpu hipFree(cellsDev); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout << elapsed_secs; ofstream myfile; myfile.open("para4.txt"); for (int i = 0; i < arraySize; i++) { myfile << cells[i] << endl; } free(cells); myfile.close(); cin >> b; return 0; }
4f144d70afd9b6ba7cd047eaac15463b29fd972a.cu
#include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include "iostream" #include "stdlib.h" #include <thread> // std::this_thread::sleep_for #include <chrono> // std::chrono::seconds #include "time.h" #include <ctime> #include "fstream" using namespace std; int getPos(int m, int n, const int width) { return m* width + n; } void printCells(int* cells, int const height, int const width) { for (int i = 0; i < height + 2; i++) { for (int j = 0; j < width + 2; j++) { if (cells[getPos(i, j, width)] == 1) { cout << "O" << " "; } else { cout << "-" << " "; } } cout << endl; } cout << endl; std::this_thread::sleep_for(std::chrono::milliseconds(100)); system("cls"); } void populateArray(int* cellArray, int arraySize) { for (int i = 0; i < arraySize; i++) { cellArray[i] = rand() % 2; } } __device__ int getX(int i, int width) { return i % width; } __device__ int getY(int i, int width) { return i / width; } __device__ int getI(int m, int n, int width) { return m * width + n; } //Gets the neigbour cells via von Neuman Neigbourhood __device__ int getNeigbours(int m, int n, int* cells, int width, int height) { int neigbours = 0; for (int i = m - 1; i <= m + 1; i++) { for (int j = n - 1; j <= n + 1; j++) { if (i >= 0 && i < height && j >= 0 && j < width) { neigbours += cells[getI(i, j, width)]; } else { neigbours += cells[getI((i + height) % height, (j + width) % width, width)]; } } } return neigbours; } // rules that determines the state of the cell __device__ int rules(int neigbours, int state) { int n = neigbours - state; if (state == 1) { if (n > 1 && n < 4) { return 1; } else { return 0; } } else { if (n == 3){ return 1; } return 0; } } // creates the new state of the world __global__ void evolve(int* cells, const int height, const int width, const int arraySize, const int cellsPerThread) { extern __shared__ int sharedCells[]; int i = threadIdx.x + blockIdx.x * blockDim.x; for (int k = i * cellsPerThread; k < ((i + 1) * cellsPerThread); k++) { sharedCells[k] = cells[k]; int x, y, neigbours; x = getX(k, width); y = getY(k, width); neigbours = getNeigbours(y, x, sharedCells, width, height); cells[k] = rules(neigbours, sharedCells[getI(y, x, width)]); __syncthreads(); } } // Runs the simulation int main() { srand(1); const int height = 100, width = 100, arraySize = 10000, timeSteps = 10000, cellsPerThread = 10, gridSize = 10; char b; int* cells; // CPU int* cellsDev; // GPU cells = (int*)malloc(sizeof(int)*arraySize); // creating arrays populateArray(cells, arraySize); cudaMalloc((void**)&cellsDev, sizeof(float)*arraySize); // creating space on gpu cudaMemcpy(cellsDev, cells, sizeof(int)*arraySize, cudaMemcpyHostToDevice); // copying arrays to gpu clock_t begin = clock(); for (int i = 1; i < timeSteps; i++) { evolve <<<gridSize, arraySize / cellsPerThread / gridSize >>>(cellsDev, height, width, arraySize, cellsPerThread); // running evolution iteration } clock_t end = clock(); cudaMemcpy(cells, cellsDev, sizeof(int)*arraySize, cudaMemcpyDeviceToHost); // copying cells back from gpu to cpu cudaFree(cellsDev); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout << elapsed_secs; ofstream myfile; myfile.open("para4.txt"); for (int i = 0; i < arraySize; i++) { myfile << cells[i] << endl; } free(cells); myfile.close(); cin >> b; return 0; }
45cd019fce4cbbdd7d980025c0f88baaffc52607.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void doubleArray2floatArray(const double * doubleArray, float* floatArray, const int size) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < size) { floatArray[i] = (float) doubleArray[i]; } }
45cd019fce4cbbdd7d980025c0f88baaffc52607.cu
#include "includes.h" __global__ void doubleArray2floatArray(const double * doubleArray, float* floatArray, const int size) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < size) { floatArray[i] = (float) doubleArray[i]; } }
96f0e9784b8d854e0feb8ca74efcd001791f40de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "box3d3r-16x16-1-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 10; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_1_5; float __reg_1_6; __shared__ float __a_sb_double[__blockSize * 2]; float *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.176f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -3, -3)))) + (0.0020f * (__SBREF(__a_sb, -3, -2)))) + (0.0030f * (__SBREF(__a_sb, -3, -1)))) + (0.0040f * (__SBREF(__a_sb, -3, 0)))) + (0.0050f * (__SBREF(__a_sb, -3, 1)))) + (0.0060f * (__SBREF(__a_sb, -3, 2)))) + (0.0070f * (__SBREF(__a_sb, -3, 3)))) + (0.0080f * (__SBREF(__a_sb, -2, -3)))) + (0.0090f * (__SBREF(__a_sb, -2, -2)))) + (0.0100f * (__SBREF(__a_sb, -2, -1)))) + (0.0110f * (__SBREF(__a_sb, -2, 0)))) + (0.0120f * (__SBREF(__a_sb, -2, 1)))) + (0.0130f * (__SBREF(__a_sb, -2, 2)))) + (0.0140f * (__SBREF(__a_sb, -2, 3)))) + (0.0150f * (__SBREF(__a_sb, -1, -3)))) + (0.0160f * (__SBREF(__a_sb, -1, -2)))) + (0.0170f * (__SBREF(__a_sb, -1, -1)))) + (0.0180f * (__SBREF(__a_sb, -1, 0)))) + (0.0190f * (__SBREF(__a_sb, -1, 1)))) + (0.0200f * (__SBREF(__a_sb, -1, 2)))) + (0.0210f * (__SBREF(__a_sb, -1, 3)))) + (0.0220f * (__SBREF(__a_sb, 0, -3)))) + (0.0230f * (__SBREF(__a_sb, 0, -2)))) + (0.0240f * (__SBREF(__a_sb, 0, -1)))) + (0.0250f * (__SBREF(__a_sb, 0, 1)))) + (0.0260f * (__SBREF(__a_sb, 0, 2)))) + (0.0270f * (__SBREF(__a_sb, 0, 3)))) + (0.0280f * (__SBREF(__a_sb, 1, -3)))) + (0.0290f * (__SBREF(__a_sb, 1, -2)))) + (0.0300f * (__SBREF(__a_sb, 1, -1)))) + (0.0310f * (__SBREF(__a_sb, 1, 0)))) + (0.0320f * (__SBREF(__a_sb, 1, 1)))) + (0.0330f * (__SBREF(__a_sb, 1, 2)))) + (0.0340f * (__SBREF(__a_sb, 1, 3)))) + (0.0350f * (__SBREF(__a_sb, 2, -3)))) + (0.0360f * (__SBREF(__a_sb, 2, -2)))) + (0.0370f * (__SBREF(__a_sb, 2, -1)))) + (0.0380f * (__SBREF(__a_sb, 2, 0)))) + (0.0390f * (__SBREF(__a_sb, 2, 1)))) + (0.0400f * (__SBREF(__a_sb, 2, 2)))) + (0.0410f * (__SBREF(__a_sb, 2, 3)))) + (0.0420f * (__SBREF(__a_sb, 3, -3)))) + (0.0430f * (__SBREF(__a_sb, 3, -2)))) + (0.0440f * (__SBREF(__a_sb, 3, -1)))) + (0.0450f * (__SBREF(__a_sb, 3, 0)))) + (0.0460f * (__SBREF(__a_sb, 3, 1)))) + (0.0470f * (__SBREF(__a_sb, 3, 2)))) + (0.0480f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1808f * (__REGREF(__a, 0, 0)))) - (0.0011f * (__SBREF(__a_sb, -3, -3)))) - (0.0021f * (__SBREF(__a_sb, -3, -2)))) - (0.0031f * (__SBREF(__a_sb, -3, -1)))) - (0.0041f * (__SBREF(__a_sb, -3, 0)))) - (0.0051f * (__SBREF(__a_sb, -3, 1)))) - (0.0061f * (__SBREF(__a_sb, -3, 2)))) - (0.0071f * (__SBREF(__a_sb, -3, 3)))) - (0.0081f * (__SBREF(__a_sb, -2, -3)))) - (0.0091f * (__SBREF(__a_sb, -2, -2)))) - (0.0101f * (__SBREF(__a_sb, -2, -1)))) - (0.0111f * (__SBREF(__a_sb, -2, 0)))) - (0.0121f * (__SBREF(__a_sb, -2, 1)))) - (0.0131f * (__SBREF(__a_sb, -2, 2)))) - (0.0141f * (__SBREF(__a_sb, -2, 3)))) - (0.0151f * (__SBREF(__a_sb, -1, -3)))) - (0.0161f * (__SBREF(__a_sb, -1, -2)))) - (0.0171f * (__SBREF(__a_sb, -1, -1)))) - (0.0181f * (__SBREF(__a_sb, -1, 0)))) - (0.0191f * (__SBREF(__a_sb, -1, 1)))) - (0.0201f * (__SBREF(__a_sb, -1, 2)))) - (0.0211f * (__SBREF(__a_sb, -1, 3)))) - (0.0221f * (__SBREF(__a_sb, 0, -3)))) - (0.0231f * (__SBREF(__a_sb, 0, -2)))) - (0.0241f * (__SBREF(__a_sb, 0, -1)))) - (0.0251f * (__SBREF(__a_sb, 0, 1)))) - (0.0261f * (__SBREF(__a_sb, 0, 2)))) - (0.0271f * (__SBREF(__a_sb, 0, 3)))) - (0.0281f * (__SBREF(__a_sb, 1, -3)))) - (0.0291f * (__SBREF(__a_sb, 1, -2)))) - (0.0301f * (__SBREF(__a_sb, 1, -1)))) - (0.0311f * (__SBREF(__a_sb, 1, 0)))) - (0.0321f * (__SBREF(__a_sb, 1, 1)))) - (0.0331f * (__SBREF(__a_sb, 1, 2)))) - (0.0341f * (__SBREF(__a_sb, 1, 3)))) - (0.0351f * (__SBREF(__a_sb, 2, -3)))) - (0.0361f * (__SBREF(__a_sb, 2, -2)))) - (0.0371f * (__SBREF(__a_sb, 2, -1)))) - (0.0381f * (__SBREF(__a_sb, 2, 0)))) - (0.0391f * (__SBREF(__a_sb, 2, 1)))) - (0.0401f * (__SBREF(__a_sb, 2, 2)))) - (0.0411f * (__SBREF(__a_sb, 2, 3)))) - (0.0421f * (__SBREF(__a_sb, 3, -3)))) - (0.0431f * (__SBREF(__a_sb, 3, -2)))) - (0.0441f * (__SBREF(__a_sb, 3, -1)))) - (0.0451f * (__SBREF(__a_sb, 3, 0)))) - (0.0461f * (__SBREF(__a_sb, 3, 1)))) - (0.0471f * (__SBREF(__a_sb, 3, 2)))) - (0.0481f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1856f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -3, -3)))) + (0.0022f * (__SBREF(__a_sb, -3, -2)))) + (0.0032f * (__SBREF(__a_sb, -3, -1)))) + (0.0042f * (__SBREF(__a_sb, -3, 0)))) + (0.0052f * (__SBREF(__a_sb, -3, 1)))) + (0.0062f * (__SBREF(__a_sb, -3, 2)))) + (0.0072f * (__SBREF(__a_sb, -3, 3)))) + (0.0082f * (__SBREF(__a_sb, -2, -3)))) + (0.0092f * (__SBREF(__a_sb, -2, -2)))) + (0.0102f * (__SBREF(__a_sb, -2, -1)))) + (0.0112f * (__SBREF(__a_sb, -2, 0)))) + (0.0122f * (__SBREF(__a_sb, -2, 1)))) + (0.0132f * (__SBREF(__a_sb, -2, 2)))) + (0.0142f * (__SBREF(__a_sb, -2, 3)))) + (0.0152f * (__SBREF(__a_sb, -1, -3)))) + (0.0162f * (__SBREF(__a_sb, -1, -2)))) + (0.0172f * (__SBREF(__a_sb, -1, -1)))) + (0.0182f * (__SBREF(__a_sb, -1, 0)))) + (0.0192f * (__SBREF(__a_sb, -1, 1)))) + (0.0202f * (__SBREF(__a_sb, -1, 2)))) + (0.0212f * (__SBREF(__a_sb, -1, 3)))) + (0.0222f * (__SBREF(__a_sb, 0, -3)))) + (0.0232f * (__SBREF(__a_sb, 0, -2)))) + (0.0242f * (__SBREF(__a_sb, 0, -1)))) + (0.0252f * (__SBREF(__a_sb, 0, 1)))) + (0.0262f * (__SBREF(__a_sb, 0, 2)))) + (0.0272f * (__SBREF(__a_sb, 0, 3)))) + (0.0282f * (__SBREF(__a_sb, 1, -3)))) + (0.0292f * (__SBREF(__a_sb, 1, -2)))) + (0.0302f * (__SBREF(__a_sb, 1, -1)))) + (0.0312f * (__SBREF(__a_sb, 1, 0)))) + (0.0322f * (__SBREF(__a_sb, 1, 1)))) + (0.0332f * (__SBREF(__a_sb, 1, 2)))) + (0.0342f * (__SBREF(__a_sb, 1, 3)))) + (0.0352f * (__SBREF(__a_sb, 2, -3)))) + (0.0362f * (__SBREF(__a_sb, 2, -2)))) + (0.0372f * (__SBREF(__a_sb, 2, -1)))) + (0.0382f * (__SBREF(__a_sb, 2, 0)))) + (0.0392f * (__SBREF(__a_sb, 2, 1)))) + (0.0402f * (__SBREF(__a_sb, 2, 2)))) + (0.0412f * (__SBREF(__a_sb, 2, 3)))) + (0.0422f * (__SBREF(__a_sb, 3, -3)))) + (0.0432f * (__SBREF(__a_sb, 3, -2)))) + (0.0442f * (__SBREF(__a_sb, 3, -1)))) + (0.0452f * (__SBREF(__a_sb, 3, 0)))) + (0.0462f * (__SBREF(__a_sb, 3, 1)))) + (0.0472f * (__SBREF(__a_sb, 3, 2)))) + (0.0482f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1904f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -3, -3)))) + (0.0023f * (__SBREF(__a_sb, -3, -2)))) + (0.0033f * (__SBREF(__a_sb, -3, -1)))) + (0.0043f * (__SBREF(__a_sb, -3, 0)))) + (0.0053f * (__SBREF(__a_sb, -3, 1)))) + (0.0063f * (__SBREF(__a_sb, -3, 2)))) + (0.0073f * (__SBREF(__a_sb, -3, 3)))) + (0.0083f * (__SBREF(__a_sb, -2, -3)))) + (0.0093f * (__SBREF(__a_sb, -2, -2)))) + (0.0103f * (__SBREF(__a_sb, -2, -1)))) + (0.0113f * (__SBREF(__a_sb, -2, 0)))) + (0.0123f * (__SBREF(__a_sb, -2, 1)))) + (0.0133f * (__SBREF(__a_sb, -2, 2)))) + (0.0143f * (__SBREF(__a_sb, -2, 3)))) + (0.0153f * (__SBREF(__a_sb, -1, -3)))) + (0.0163f * (__SBREF(__a_sb, -1, -2)))) + (0.0173f * (__SBREF(__a_sb, -1, -1)))) + (0.0183f * (__SBREF(__a_sb, -1, 0)))) + (0.0193f * (__SBREF(__a_sb, -1, 1)))) + (0.0203f * (__SBREF(__a_sb, -1, 2)))) + (0.0213f * (__SBREF(__a_sb, -1, 3)))) + (0.0223f * (__SBREF(__a_sb, 0, -3)))) + (0.0233f * (__SBREF(__a_sb, 0, -2)))) + (0.0243f * (__SBREF(__a_sb, 0, -1)))) + (0.0253f * (__SBREF(__a_sb, 0, 1)))) + (0.0263f * (__SBREF(__a_sb, 0, 2)))) + (0.0273f * (__SBREF(__a_sb, 0, 3)))) + (0.0283f * (__SBREF(__a_sb, 1, -3)))) + (0.0293f * (__SBREF(__a_sb, 1, -2)))) + (0.0303f * (__SBREF(__a_sb, 1, -1)))) + (0.0313f * (__SBREF(__a_sb, 1, 0)))) + (0.0323f * (__SBREF(__a_sb, 1, 1)))) + (0.0333f * (__SBREF(__a_sb, 1, 2)))) + (0.0343f * (__SBREF(__a_sb, 1, 3)))) + (0.0353f * (__SBREF(__a_sb, 2, -3)))) + (0.0363f * (__SBREF(__a_sb, 2, -2)))) + (0.0373f * (__SBREF(__a_sb, 2, -1)))) + (0.0383f * (__SBREF(__a_sb, 2, 0)))) + (0.0393f * (__SBREF(__a_sb, 2, 1)))) + (0.0403f * (__SBREF(__a_sb, 2, 2)))) + (0.0413f * (__SBREF(__a_sb, 2, 3)))) + (0.0423f * (__SBREF(__a_sb, 3, -3)))) + (0.0433f * (__SBREF(__a_sb, 3, -2)))) + (0.0443f * (__SBREF(__a_sb, 3, -1)))) + (0.0453f * (__SBREF(__a_sb, 3, 0)))) + (0.0463f * (__SBREF(__a_sb, 3, 1)))) + (0.0473f * (__SBREF(__a_sb, 3, 2)))) + (0.0483f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1952f * (__REGREF(__a, 0, 0)))) - (0.0014f * (__SBREF(__a_sb, -3, -3)))) - (0.0024f * (__SBREF(__a_sb, -3, -2)))) - (0.0034f * (__SBREF(__a_sb, -3, -1)))) - (0.0044f * (__SBREF(__a_sb, -3, 0)))) - (0.0054f * (__SBREF(__a_sb, -3, 1)))) - (0.0064f * (__SBREF(__a_sb, -3, 2)))) - (0.0074f * (__SBREF(__a_sb, -3, 3)))) - (0.0084f * (__SBREF(__a_sb, -2, -3)))) - (0.0094f * (__SBREF(__a_sb, -2, -2)))) - (0.0104f * (__SBREF(__a_sb, -2, -1)))) - (0.0114f * (__SBREF(__a_sb, -2, 0)))) - (0.0124f * (__SBREF(__a_sb, -2, 1)))) - (0.0134f * (__SBREF(__a_sb, -2, 2)))) - (0.0144f * (__SBREF(__a_sb, -2, 3)))) - (0.0154f * (__SBREF(__a_sb, -1, -3)))) - (0.0164f * (__SBREF(__a_sb, -1, -2)))) - (0.0174f * (__SBREF(__a_sb, -1, -1)))) - (0.0184f * (__SBREF(__a_sb, -1, 0)))) - (0.0194f * (__SBREF(__a_sb, -1, 1)))) - (0.0204f * (__SBREF(__a_sb, -1, 2)))) - (0.0214f * (__SBREF(__a_sb, -1, 3)))) - (0.0224f * (__SBREF(__a_sb, 0, -3)))) - (0.0234f * (__SBREF(__a_sb, 0, -2)))) - (0.0244f * (__SBREF(__a_sb, 0, -1)))) - (0.0254f * (__SBREF(__a_sb, 0, 1)))) - (0.0264f * (__SBREF(__a_sb, 0, 2)))) - (0.0274f * (__SBREF(__a_sb, 0, 3)))) - (0.0284f * (__SBREF(__a_sb, 1, -3)))) - (0.0294f * (__SBREF(__a_sb, 1, -2)))) - (0.0304f * (__SBREF(__a_sb, 1, -1)))) - (0.0314f * (__SBREF(__a_sb, 1, 0)))) - (0.0324f * (__SBREF(__a_sb, 1, 1)))) - (0.0334f * (__SBREF(__a_sb, 1, 2)))) - (0.0344f * (__SBREF(__a_sb, 1, 3)))) - (0.0354f * (__SBREF(__a_sb, 2, -3)))) - (0.0364f * (__SBREF(__a_sb, 2, -2)))) - (0.0374f * (__SBREF(__a_sb, 2, -1)))) - (0.0384f * (__SBREF(__a_sb, 2, 0)))) - (0.0394f * (__SBREF(__a_sb, 2, 1)))) - (0.0404f * (__SBREF(__a_sb, 2, 2)))) - (0.0414f * (__SBREF(__a_sb, 2, 3)))) - (0.0424f * (__SBREF(__a_sb, 3, -3)))) - (0.0434f * (__SBREF(__a_sb, 3, -2)))) - (0.0444f * (__SBREF(__a_sb, 3, -1)))) - (0.0454f * (__SBREF(__a_sb, 3, 0)))) - (0.0464f * (__SBREF(__a_sb, 3, 1)))) - (0.0474f * (__SBREF(__a_sb, 3, 2)))) - (0.0484f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-((-0.300f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -3, -3)))) + (0.0025f * (__SBREF(__a_sb, -3, -2)))) + (0.0035f * (__SBREF(__a_sb, -3, -1)))) + (0.0045f * (__SBREF(__a_sb, -3, 0)))) + (0.0055f * (__SBREF(__a_sb, -3, 1)))) + (0.0065f * (__SBREF(__a_sb, -3, 2)))) + (0.0075f * (__SBREF(__a_sb, -3, 3)))) + (0.0085f * (__SBREF(__a_sb, -2, -3)))) + (0.0095f * (__SBREF(__a_sb, -2, -2)))) + (0.0105f * (__SBREF(__a_sb, -2, -1)))) + (0.0115f * (__SBREF(__a_sb, -2, 0)))) + (0.0125f * (__SBREF(__a_sb, -2, 1)))) + (0.0135f * (__SBREF(__a_sb, -2, 2)))) + (0.0145f * (__SBREF(__a_sb, -2, 3)))) + (0.0155f * (__SBREF(__a_sb, -1, -3)))) + (0.0165f * (__SBREF(__a_sb, -1, -2)))) + (0.0175f * (__SBREF(__a_sb, -1, -1)))) + (0.0185f * (__SBREF(__a_sb, -1, 0)))) + (0.0195f * (__SBREF(__a_sb, -1, 1)))) + (0.0205f * (__SBREF(__a_sb, -1, 2)))) + (0.0215f * (__SBREF(__a_sb, -1, 3)))) + (0.0225f * (__SBREF(__a_sb, 0, -3)))) + (0.0235f * (__SBREF(__a_sb, 0, -2)))) + (0.0245f * (__SBREF(__a_sb, 0, -1)))) + (0.0255f * (__SBREF(__a_sb, 0, 1)))) + (0.0265f * (__SBREF(__a_sb, 0, 2)))) + (0.0275f * (__SBREF(__a_sb, 0, 3)))) + (0.0285f * (__SBREF(__a_sb, 1, -3)))) + (0.0295f * (__SBREF(__a_sb, 1, -2)))) + (0.0305f * (__SBREF(__a_sb, 1, -1)))) + (0.0315f * (__SBREF(__a_sb, 1, 0)))) + (0.0325f * (__SBREF(__a_sb, 1, 1)))) + (0.0335f * (__SBREF(__a_sb, 1, 2)))) + (0.0345f * (__SBREF(__a_sb, 1, 3)))) + (0.0355f * (__SBREF(__a_sb, 2, -3)))) + (0.0365f * (__SBREF(__a_sb, 2, -2)))) + (0.0375f * (__SBREF(__a_sb, 2, -1)))) + (0.0385f * (__SBREF(__a_sb, 2, 0)))) + (0.0395f * (__SBREF(__a_sb, 2, 1)))) + (0.0405f * (__SBREF(__a_sb, 2, 2)))) + (0.0415f * (__SBREF(__a_sb, 2, 3)))) + (0.0425f * (__SBREF(__a_sb, 3, -3)))) + (0.0435f * (__SBREF(__a_sb, 3, -2)))) + (0.0445f * (__SBREF(__a_sb, 3, -1)))) + (0.0455f * (__SBREF(__a_sb, 3, 0)))) + (0.0465f * (__SBREF(__a_sb, 3, 1)))) + (0.0475f * (__SBREF(__a_sb, 3, 2)))) + (0.1485f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_5(out, a) do { float etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((0.2048f * (__REGREF(__a, 0, 0)))) - (0.0016f * (__SBREF(__a_sb, -3, -3)))) - (0.0026f * (__SBREF(__a_sb, -3, -2)))) - (0.0036f * (__SBREF(__a_sb, -3, -1)))) - (0.0046f * (__SBREF(__a_sb, -3, 0)))) - (0.0056f * (__SBREF(__a_sb, -3, 1)))) - (0.0066f * (__SBREF(__a_sb, -3, 2)))) - (0.0076f * (__SBREF(__a_sb, -3, 3)))) - (0.0086f * (__SBREF(__a_sb, -2, -3)))) - (0.0096f * (__SBREF(__a_sb, -2, -2)))) - (0.0106f * (__SBREF(__a_sb, -2, -1)))) - (0.0116f * (__SBREF(__a_sb, -2, 0)))) - (0.0126f * (__SBREF(__a_sb, -2, 1)))) - (0.0136f * (__SBREF(__a_sb, -2, 2)))) - (0.0146f * (__SBREF(__a_sb, -2, 3)))) - (0.0156f * (__SBREF(__a_sb, -1, -3)))) - (0.0166f * (__SBREF(__a_sb, -1, -2)))) - (0.0176f * (__SBREF(__a_sb, -1, -1)))) - (0.0186f * (__SBREF(__a_sb, -1, 0)))) - (0.0196f * (__SBREF(__a_sb, -1, 1)))) - (0.0206f * (__SBREF(__a_sb, -1, 2)))) - (0.0216f * (__SBREF(__a_sb, -1, 3)))) - (0.0226f * (__SBREF(__a_sb, 0, -3)))) - (0.0236f * (__SBREF(__a_sb, 0, -2)))) - (0.0246f * (__SBREF(__a_sb, 0, -1)))) - (0.0256f * (__SBREF(__a_sb, 0, 1)))) - (0.0266f * (__SBREF(__a_sb, 0, 2)))) - (0.0276f * (__SBREF(__a_sb, 0, 3)))) - (0.0286f * (__SBREF(__a_sb, 1, -3)))) - (0.0296f * (__SBREF(__a_sb, 1, -2)))) - (0.0306f * (__SBREF(__a_sb, 1, -1)))) - (0.0316f * (__SBREF(__a_sb, 1, 0)))) - (0.0326f * (__SBREF(__a_sb, 1, 1)))) - (0.0336f * (__SBREF(__a_sb, 1, 2)))) - (0.0346f * (__SBREF(__a_sb, 1, 3)))) - (0.0356f * (__SBREF(__a_sb, 2, -3)))) - (0.0366f * (__SBREF(__a_sb, 2, -2)))) - (0.0376f * (__SBREF(__a_sb, 2, -1)))) - (0.0386f * (__SBREF(__a_sb, 2, 0)))) - (0.0396f * (__SBREF(__a_sb, 2, 1)))) - (0.0406f * (__SBREF(__a_sb, 2, 2)))) - (0.0416f * (__SBREF(__a_sb, 2, 3)))) - (0.0426f * (__SBREF(__a_sb, 3, -3)))) - (0.0436f * (__SBREF(__a_sb, 3, -2)))) - (0.0446f * (__SBREF(__a_sb, 3, -1)))) - (0.0456f * (__SBREF(__a_sb, 3, 0)))) - (0.0466f * (__SBREF(__a_sb, 3, 1)))) - (0.0476f * (__SBREF(__a_sb, 3, 2)))) - (0.0486f * (__SBREF(__a_sb, 3, 3)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_6(out, a) do { float etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg); } else out3 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(3, __reg_1_3); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(3, __reg_1_3); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 10;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 3, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 3, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 3, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 3, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 3, __reg_1_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); } else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); } else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); } else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); } } else { for (__h = 7; __h <= __side1LenOl - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 3, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 3, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 3, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 3, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 3, __reg_1_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 3, __reg_1_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 3, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 3, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 3, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 3, __reg_1_3); __h++; } }
96f0e9784b8d854e0feb8ca74efcd001791f40de.cu
#include "box3d3r-16x16-1-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 10; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; float __reg_0; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_1_5; float __reg_1_6; __shared__ float __a_sb_double[__blockSize * 2]; float *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.176f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -3, -3)))) + (0.0020f * (__SBREF(__a_sb, -3, -2)))) + (0.0030f * (__SBREF(__a_sb, -3, -1)))) + (0.0040f * (__SBREF(__a_sb, -3, 0)))) + (0.0050f * (__SBREF(__a_sb, -3, 1)))) + (0.0060f * (__SBREF(__a_sb, -3, 2)))) + (0.0070f * (__SBREF(__a_sb, -3, 3)))) + (0.0080f * (__SBREF(__a_sb, -2, -3)))) + (0.0090f * (__SBREF(__a_sb, -2, -2)))) + (0.0100f * (__SBREF(__a_sb, -2, -1)))) + (0.0110f * (__SBREF(__a_sb, -2, 0)))) + (0.0120f * (__SBREF(__a_sb, -2, 1)))) + (0.0130f * (__SBREF(__a_sb, -2, 2)))) + (0.0140f * (__SBREF(__a_sb, -2, 3)))) + (0.0150f * (__SBREF(__a_sb, -1, -3)))) + (0.0160f * (__SBREF(__a_sb, -1, -2)))) + (0.0170f * (__SBREF(__a_sb, -1, -1)))) + (0.0180f * (__SBREF(__a_sb, -1, 0)))) + (0.0190f * (__SBREF(__a_sb, -1, 1)))) + (0.0200f * (__SBREF(__a_sb, -1, 2)))) + (0.0210f * (__SBREF(__a_sb, -1, 3)))) + (0.0220f * (__SBREF(__a_sb, 0, -3)))) + (0.0230f * (__SBREF(__a_sb, 0, -2)))) + (0.0240f * (__SBREF(__a_sb, 0, -1)))) + (0.0250f * (__SBREF(__a_sb, 0, 1)))) + (0.0260f * (__SBREF(__a_sb, 0, 2)))) + (0.0270f * (__SBREF(__a_sb, 0, 3)))) + (0.0280f * (__SBREF(__a_sb, 1, -3)))) + (0.0290f * (__SBREF(__a_sb, 1, -2)))) + (0.0300f * (__SBREF(__a_sb, 1, -1)))) + (0.0310f * (__SBREF(__a_sb, 1, 0)))) + (0.0320f * (__SBREF(__a_sb, 1, 1)))) + (0.0330f * (__SBREF(__a_sb, 1, 2)))) + (0.0340f * (__SBREF(__a_sb, 1, 3)))) + (0.0350f * (__SBREF(__a_sb, 2, -3)))) + (0.0360f * (__SBREF(__a_sb, 2, -2)))) + (0.0370f * (__SBREF(__a_sb, 2, -1)))) + (0.0380f * (__SBREF(__a_sb, 2, 0)))) + (0.0390f * (__SBREF(__a_sb, 2, 1)))) + (0.0400f * (__SBREF(__a_sb, 2, 2)))) + (0.0410f * (__SBREF(__a_sb, 2, 3)))) + (0.0420f * (__SBREF(__a_sb, 3, -3)))) + (0.0430f * (__SBREF(__a_sb, 3, -2)))) + (0.0440f * (__SBREF(__a_sb, 3, -1)))) + (0.0450f * (__SBREF(__a_sb, 3, 0)))) + (0.0460f * (__SBREF(__a_sb, 3, 1)))) + (0.0470f * (__SBREF(__a_sb, 3, 2)))) + (0.0480f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1808f * (__REGREF(__a, 0, 0)))) - (0.0011f * (__SBREF(__a_sb, -3, -3)))) - (0.0021f * (__SBREF(__a_sb, -3, -2)))) - (0.0031f * (__SBREF(__a_sb, -3, -1)))) - (0.0041f * (__SBREF(__a_sb, -3, 0)))) - (0.0051f * (__SBREF(__a_sb, -3, 1)))) - (0.0061f * (__SBREF(__a_sb, -3, 2)))) - (0.0071f * (__SBREF(__a_sb, -3, 3)))) - (0.0081f * (__SBREF(__a_sb, -2, -3)))) - (0.0091f * (__SBREF(__a_sb, -2, -2)))) - (0.0101f * (__SBREF(__a_sb, -2, -1)))) - (0.0111f * (__SBREF(__a_sb, -2, 0)))) - (0.0121f * (__SBREF(__a_sb, -2, 1)))) - (0.0131f * (__SBREF(__a_sb, -2, 2)))) - (0.0141f * (__SBREF(__a_sb, -2, 3)))) - (0.0151f * (__SBREF(__a_sb, -1, -3)))) - (0.0161f * (__SBREF(__a_sb, -1, -2)))) - (0.0171f * (__SBREF(__a_sb, -1, -1)))) - (0.0181f * (__SBREF(__a_sb, -1, 0)))) - (0.0191f * (__SBREF(__a_sb, -1, 1)))) - (0.0201f * (__SBREF(__a_sb, -1, 2)))) - (0.0211f * (__SBREF(__a_sb, -1, 3)))) - (0.0221f * (__SBREF(__a_sb, 0, -3)))) - (0.0231f * (__SBREF(__a_sb, 0, -2)))) - (0.0241f * (__SBREF(__a_sb, 0, -1)))) - (0.0251f * (__SBREF(__a_sb, 0, 1)))) - (0.0261f * (__SBREF(__a_sb, 0, 2)))) - (0.0271f * (__SBREF(__a_sb, 0, 3)))) - (0.0281f * (__SBREF(__a_sb, 1, -3)))) - (0.0291f * (__SBREF(__a_sb, 1, -2)))) - (0.0301f * (__SBREF(__a_sb, 1, -1)))) - (0.0311f * (__SBREF(__a_sb, 1, 0)))) - (0.0321f * (__SBREF(__a_sb, 1, 1)))) - (0.0331f * (__SBREF(__a_sb, 1, 2)))) - (0.0341f * (__SBREF(__a_sb, 1, 3)))) - (0.0351f * (__SBREF(__a_sb, 2, -3)))) - (0.0361f * (__SBREF(__a_sb, 2, -2)))) - (0.0371f * (__SBREF(__a_sb, 2, -1)))) - (0.0381f * (__SBREF(__a_sb, 2, 0)))) - (0.0391f * (__SBREF(__a_sb, 2, 1)))) - (0.0401f * (__SBREF(__a_sb, 2, 2)))) - (0.0411f * (__SBREF(__a_sb, 2, 3)))) - (0.0421f * (__SBREF(__a_sb, 3, -3)))) - (0.0431f * (__SBREF(__a_sb, 3, -2)))) - (0.0441f * (__SBREF(__a_sb, 3, -1)))) - (0.0451f * (__SBREF(__a_sb, 3, 0)))) - (0.0461f * (__SBREF(__a_sb, 3, 1)))) - (0.0471f * (__SBREF(__a_sb, 3, 2)))) - (0.0481f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1856f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -3, -3)))) + (0.0022f * (__SBREF(__a_sb, -3, -2)))) + (0.0032f * (__SBREF(__a_sb, -3, -1)))) + (0.0042f * (__SBREF(__a_sb, -3, 0)))) + (0.0052f * (__SBREF(__a_sb, -3, 1)))) + (0.0062f * (__SBREF(__a_sb, -3, 2)))) + (0.0072f * (__SBREF(__a_sb, -3, 3)))) + (0.0082f * (__SBREF(__a_sb, -2, -3)))) + (0.0092f * (__SBREF(__a_sb, -2, -2)))) + (0.0102f * (__SBREF(__a_sb, -2, -1)))) + (0.0112f * (__SBREF(__a_sb, -2, 0)))) + (0.0122f * (__SBREF(__a_sb, -2, 1)))) + (0.0132f * (__SBREF(__a_sb, -2, 2)))) + (0.0142f * (__SBREF(__a_sb, -2, 3)))) + (0.0152f * (__SBREF(__a_sb, -1, -3)))) + (0.0162f * (__SBREF(__a_sb, -1, -2)))) + (0.0172f * (__SBREF(__a_sb, -1, -1)))) + (0.0182f * (__SBREF(__a_sb, -1, 0)))) + (0.0192f * (__SBREF(__a_sb, -1, 1)))) + (0.0202f * (__SBREF(__a_sb, -1, 2)))) + (0.0212f * (__SBREF(__a_sb, -1, 3)))) + (0.0222f * (__SBREF(__a_sb, 0, -3)))) + (0.0232f * (__SBREF(__a_sb, 0, -2)))) + (0.0242f * (__SBREF(__a_sb, 0, -1)))) + (0.0252f * (__SBREF(__a_sb, 0, 1)))) + (0.0262f * (__SBREF(__a_sb, 0, 2)))) + (0.0272f * (__SBREF(__a_sb, 0, 3)))) + (0.0282f * (__SBREF(__a_sb, 1, -3)))) + (0.0292f * (__SBREF(__a_sb, 1, -2)))) + (0.0302f * (__SBREF(__a_sb, 1, -1)))) + (0.0312f * (__SBREF(__a_sb, 1, 0)))) + (0.0322f * (__SBREF(__a_sb, 1, 1)))) + (0.0332f * (__SBREF(__a_sb, 1, 2)))) + (0.0342f * (__SBREF(__a_sb, 1, 3)))) + (0.0352f * (__SBREF(__a_sb, 2, -3)))) + (0.0362f * (__SBREF(__a_sb, 2, -2)))) + (0.0372f * (__SBREF(__a_sb, 2, -1)))) + (0.0382f * (__SBREF(__a_sb, 2, 0)))) + (0.0392f * (__SBREF(__a_sb, 2, 1)))) + (0.0402f * (__SBREF(__a_sb, 2, 2)))) + (0.0412f * (__SBREF(__a_sb, 2, 3)))) + (0.0422f * (__SBREF(__a_sb, 3, -3)))) + (0.0432f * (__SBREF(__a_sb, 3, -2)))) + (0.0442f * (__SBREF(__a_sb, 3, -1)))) + (0.0452f * (__SBREF(__a_sb, 3, 0)))) + (0.0462f * (__SBREF(__a_sb, 3, 1)))) + (0.0472f * (__SBREF(__a_sb, 3, 2)))) + (0.0482f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1904f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -3, -3)))) + (0.0023f * (__SBREF(__a_sb, -3, -2)))) + (0.0033f * (__SBREF(__a_sb, -3, -1)))) + (0.0043f * (__SBREF(__a_sb, -3, 0)))) + (0.0053f * (__SBREF(__a_sb, -3, 1)))) + (0.0063f * (__SBREF(__a_sb, -3, 2)))) + (0.0073f * (__SBREF(__a_sb, -3, 3)))) + (0.0083f * (__SBREF(__a_sb, -2, -3)))) + (0.0093f * (__SBREF(__a_sb, -2, -2)))) + (0.0103f * (__SBREF(__a_sb, -2, -1)))) + (0.0113f * (__SBREF(__a_sb, -2, 0)))) + (0.0123f * (__SBREF(__a_sb, -2, 1)))) + (0.0133f * (__SBREF(__a_sb, -2, 2)))) + (0.0143f * (__SBREF(__a_sb, -2, 3)))) + (0.0153f * (__SBREF(__a_sb, -1, -3)))) + (0.0163f * (__SBREF(__a_sb, -1, -2)))) + (0.0173f * (__SBREF(__a_sb, -1, -1)))) + (0.0183f * (__SBREF(__a_sb, -1, 0)))) + (0.0193f * (__SBREF(__a_sb, -1, 1)))) + (0.0203f * (__SBREF(__a_sb, -1, 2)))) + (0.0213f * (__SBREF(__a_sb, -1, 3)))) + (0.0223f * (__SBREF(__a_sb, 0, -3)))) + (0.0233f * (__SBREF(__a_sb, 0, -2)))) + (0.0243f * (__SBREF(__a_sb, 0, -1)))) + (0.0253f * (__SBREF(__a_sb, 0, 1)))) + (0.0263f * (__SBREF(__a_sb, 0, 2)))) + (0.0273f * (__SBREF(__a_sb, 0, 3)))) + (0.0283f * (__SBREF(__a_sb, 1, -3)))) + (0.0293f * (__SBREF(__a_sb, 1, -2)))) + (0.0303f * (__SBREF(__a_sb, 1, -1)))) + (0.0313f * (__SBREF(__a_sb, 1, 0)))) + (0.0323f * (__SBREF(__a_sb, 1, 1)))) + (0.0333f * (__SBREF(__a_sb, 1, 2)))) + (0.0343f * (__SBREF(__a_sb, 1, 3)))) + (0.0353f * (__SBREF(__a_sb, 2, -3)))) + (0.0363f * (__SBREF(__a_sb, 2, -2)))) + (0.0373f * (__SBREF(__a_sb, 2, -1)))) + (0.0383f * (__SBREF(__a_sb, 2, 0)))) + (0.0393f * (__SBREF(__a_sb, 2, 1)))) + (0.0403f * (__SBREF(__a_sb, 2, 2)))) + (0.0413f * (__SBREF(__a_sb, 2, 3)))) + (0.0423f * (__SBREF(__a_sb, 3, -3)))) + (0.0433f * (__SBREF(__a_sb, 3, -2)))) + (0.0443f * (__SBREF(__a_sb, 3, -1)))) + (0.0453f * (__SBREF(__a_sb, 3, 0)))) + (0.0463f * (__SBREF(__a_sb, 3, 1)))) + (0.0473f * (__SBREF(__a_sb, 3, 2)))) + (0.0483f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1952f * (__REGREF(__a, 0, 0)))) - (0.0014f * (__SBREF(__a_sb, -3, -3)))) - (0.0024f * (__SBREF(__a_sb, -3, -2)))) - (0.0034f * (__SBREF(__a_sb, -3, -1)))) - (0.0044f * (__SBREF(__a_sb, -3, 0)))) - (0.0054f * (__SBREF(__a_sb, -3, 1)))) - (0.0064f * (__SBREF(__a_sb, -3, 2)))) - (0.0074f * (__SBREF(__a_sb, -3, 3)))) - (0.0084f * (__SBREF(__a_sb, -2, -3)))) - (0.0094f * (__SBREF(__a_sb, -2, -2)))) - (0.0104f * (__SBREF(__a_sb, -2, -1)))) - (0.0114f * (__SBREF(__a_sb, -2, 0)))) - (0.0124f * (__SBREF(__a_sb, -2, 1)))) - (0.0134f * (__SBREF(__a_sb, -2, 2)))) - (0.0144f * (__SBREF(__a_sb, -2, 3)))) - (0.0154f * (__SBREF(__a_sb, -1, -3)))) - (0.0164f * (__SBREF(__a_sb, -1, -2)))) - (0.0174f * (__SBREF(__a_sb, -1, -1)))) - (0.0184f * (__SBREF(__a_sb, -1, 0)))) - (0.0194f * (__SBREF(__a_sb, -1, 1)))) - (0.0204f * (__SBREF(__a_sb, -1, 2)))) - (0.0214f * (__SBREF(__a_sb, -1, 3)))) - (0.0224f * (__SBREF(__a_sb, 0, -3)))) - (0.0234f * (__SBREF(__a_sb, 0, -2)))) - (0.0244f * (__SBREF(__a_sb, 0, -1)))) - (0.0254f * (__SBREF(__a_sb, 0, 1)))) - (0.0264f * (__SBREF(__a_sb, 0, 2)))) - (0.0274f * (__SBREF(__a_sb, 0, 3)))) - (0.0284f * (__SBREF(__a_sb, 1, -3)))) - (0.0294f * (__SBREF(__a_sb, 1, -2)))) - (0.0304f * (__SBREF(__a_sb, 1, -1)))) - (0.0314f * (__SBREF(__a_sb, 1, 0)))) - (0.0324f * (__SBREF(__a_sb, 1, 1)))) - (0.0334f * (__SBREF(__a_sb, 1, 2)))) - (0.0344f * (__SBREF(__a_sb, 1, 3)))) - (0.0354f * (__SBREF(__a_sb, 2, -3)))) - (0.0364f * (__SBREF(__a_sb, 2, -2)))) - (0.0374f * (__SBREF(__a_sb, 2, -1)))) - (0.0384f * (__SBREF(__a_sb, 2, 0)))) - (0.0394f * (__SBREF(__a_sb, 2, 1)))) - (0.0404f * (__SBREF(__a_sb, 2, 2)))) - (0.0414f * (__SBREF(__a_sb, 2, 3)))) - (0.0424f * (__SBREF(__a_sb, 3, -3)))) - (0.0434f * (__SBREF(__a_sb, 3, -2)))) - (0.0444f * (__SBREF(__a_sb, 3, -1)))) - (0.0454f * (__SBREF(__a_sb, 3, 0)))) - (0.0464f * (__SBREF(__a_sb, 3, 1)))) - (0.0474f * (__SBREF(__a_sb, 3, 2)))) - (0.0484f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-((-0.300f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -3, -3)))) + (0.0025f * (__SBREF(__a_sb, -3, -2)))) + (0.0035f * (__SBREF(__a_sb, -3, -1)))) + (0.0045f * (__SBREF(__a_sb, -3, 0)))) + (0.0055f * (__SBREF(__a_sb, -3, 1)))) + (0.0065f * (__SBREF(__a_sb, -3, 2)))) + (0.0075f * (__SBREF(__a_sb, -3, 3)))) + (0.0085f * (__SBREF(__a_sb, -2, -3)))) + (0.0095f * (__SBREF(__a_sb, -2, -2)))) + (0.0105f * (__SBREF(__a_sb, -2, -1)))) + (0.0115f * (__SBREF(__a_sb, -2, 0)))) + (0.0125f * (__SBREF(__a_sb, -2, 1)))) + (0.0135f * (__SBREF(__a_sb, -2, 2)))) + (0.0145f * (__SBREF(__a_sb, -2, 3)))) + (0.0155f * (__SBREF(__a_sb, -1, -3)))) + (0.0165f * (__SBREF(__a_sb, -1, -2)))) + (0.0175f * (__SBREF(__a_sb, -1, -1)))) + (0.0185f * (__SBREF(__a_sb, -1, 0)))) + (0.0195f * (__SBREF(__a_sb, -1, 1)))) + (0.0205f * (__SBREF(__a_sb, -1, 2)))) + (0.0215f * (__SBREF(__a_sb, -1, 3)))) + (0.0225f * (__SBREF(__a_sb, 0, -3)))) + (0.0235f * (__SBREF(__a_sb, 0, -2)))) + (0.0245f * (__SBREF(__a_sb, 0, -1)))) + (0.0255f * (__SBREF(__a_sb, 0, 1)))) + (0.0265f * (__SBREF(__a_sb, 0, 2)))) + (0.0275f * (__SBREF(__a_sb, 0, 3)))) + (0.0285f * (__SBREF(__a_sb, 1, -3)))) + (0.0295f * (__SBREF(__a_sb, 1, -2)))) + (0.0305f * (__SBREF(__a_sb, 1, -1)))) + (0.0315f * (__SBREF(__a_sb, 1, 0)))) + (0.0325f * (__SBREF(__a_sb, 1, 1)))) + (0.0335f * (__SBREF(__a_sb, 1, 2)))) + (0.0345f * (__SBREF(__a_sb, 1, 3)))) + (0.0355f * (__SBREF(__a_sb, 2, -3)))) + (0.0365f * (__SBREF(__a_sb, 2, -2)))) + (0.0375f * (__SBREF(__a_sb, 2, -1)))) + (0.0385f * (__SBREF(__a_sb, 2, 0)))) + (0.0395f * (__SBREF(__a_sb, 2, 1)))) + (0.0405f * (__SBREF(__a_sb, 2, 2)))) + (0.0415f * (__SBREF(__a_sb, 2, 3)))) + (0.0425f * (__SBREF(__a_sb, 3, -3)))) + (0.0435f * (__SBREF(__a_sb, 3, -2)))) + (0.0445f * (__SBREF(__a_sb, 3, -1)))) + (0.0455f * (__SBREF(__a_sb, 3, 0)))) + (0.0465f * (__SBREF(__a_sb, 3, 1)))) + (0.0475f * (__SBREF(__a_sb, 3, 2)))) + (0.1485f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_5(out, a) do { float etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((0.2048f * (__REGREF(__a, 0, 0)))) - (0.0016f * (__SBREF(__a_sb, -3, -3)))) - (0.0026f * (__SBREF(__a_sb, -3, -2)))) - (0.0036f * (__SBREF(__a_sb, -3, -1)))) - (0.0046f * (__SBREF(__a_sb, -3, 0)))) - (0.0056f * (__SBREF(__a_sb, -3, 1)))) - (0.0066f * (__SBREF(__a_sb, -3, 2)))) - (0.0076f * (__SBREF(__a_sb, -3, 3)))) - (0.0086f * (__SBREF(__a_sb, -2, -3)))) - (0.0096f * (__SBREF(__a_sb, -2, -2)))) - (0.0106f * (__SBREF(__a_sb, -2, -1)))) - (0.0116f * (__SBREF(__a_sb, -2, 0)))) - (0.0126f * (__SBREF(__a_sb, -2, 1)))) - (0.0136f * (__SBREF(__a_sb, -2, 2)))) - (0.0146f * (__SBREF(__a_sb, -2, 3)))) - (0.0156f * (__SBREF(__a_sb, -1, -3)))) - (0.0166f * (__SBREF(__a_sb, -1, -2)))) - (0.0176f * (__SBREF(__a_sb, -1, -1)))) - (0.0186f * (__SBREF(__a_sb, -1, 0)))) - (0.0196f * (__SBREF(__a_sb, -1, 1)))) - (0.0206f * (__SBREF(__a_sb, -1, 2)))) - (0.0216f * (__SBREF(__a_sb, -1, 3)))) - (0.0226f * (__SBREF(__a_sb, 0, -3)))) - (0.0236f * (__SBREF(__a_sb, 0, -2)))) - (0.0246f * (__SBREF(__a_sb, 0, -1)))) - (0.0256f * (__SBREF(__a_sb, 0, 1)))) - (0.0266f * (__SBREF(__a_sb, 0, 2)))) - (0.0276f * (__SBREF(__a_sb, 0, 3)))) - (0.0286f * (__SBREF(__a_sb, 1, -3)))) - (0.0296f * (__SBREF(__a_sb, 1, -2)))) - (0.0306f * (__SBREF(__a_sb, 1, -1)))) - (0.0316f * (__SBREF(__a_sb, 1, 0)))) - (0.0326f * (__SBREF(__a_sb, 1, 1)))) - (0.0336f * (__SBREF(__a_sb, 1, 2)))) - (0.0346f * (__SBREF(__a_sb, 1, 3)))) - (0.0356f * (__SBREF(__a_sb, 2, -3)))) - (0.0366f * (__SBREF(__a_sb, 2, -2)))) - (0.0376f * (__SBREF(__a_sb, 2, -1)))) - (0.0386f * (__SBREF(__a_sb, 2, 0)))) - (0.0396f * (__SBREF(__a_sb, 2, 1)))) - (0.0406f * (__SBREF(__a_sb, 2, 2)))) - (0.0416f * (__SBREF(__a_sb, 2, 3)))) - (0.0426f * (__SBREF(__a_sb, 3, -3)))) - (0.0436f * (__SBREF(__a_sb, 3, -2)))) - (0.0446f * (__SBREF(__a_sb, 3, -1)))) - (0.0456f * (__SBREF(__a_sb, 3, 0)))) - (0.0466f * (__SBREF(__a_sb, 3, 1)))) - (0.0476f * (__SBREF(__a_sb, 3, 2)))) - (0.0486f * (__SBREF(__a_sb, 3, 3)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_6(out, a) do { float etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg); } else out3 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(3, __reg_1_3); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(3, __reg_1_3); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 10;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 3, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 3, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 3, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 3, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 3, __reg_1_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); } else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); } else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); } else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 2, __reg_1_5); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 1, __reg_1_6); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); } } else { for (__h = 7; __h <= __side1LenOl - 7;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 3, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 3, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 3, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 3, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 3, __reg_1_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 3, __reg_1_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 3, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 3, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 3, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 3, __reg_1_3); __h++; } }
056f89f6c8cfeddc33213a23ee9f5a1da1c69ce9.hip
// !!! This is a file automatically generated by hipify!!! #include "SamplingKernel.cuh" #define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void SampleKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream) { ////unsigned int blockCounter = 0; //unsigned int* deviceCounter; //hipMalloc(&deviceCounter, sizeof(unsigned int)); hipMemsetAsync(argDoc.deviceCounterSampleKernelD[argStreamId], 0, sizeof(unsigned int), stream); /*hipMemcpyAsync(argDoc.deviceCounterSampleKernelD[argStreamId], &argDoc.counterSampleKernelD, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ // srand(time(NULL)); // hiprandState_t* randState; // hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim); // H_ERR(hipDeviceSynchronize()); // gpuErr(hipPeekAtLastError()); //initRandState << <GridDim, BlockDim, 0, stream >> >(randState); /*H_ERR(hipDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice); LDAKernelTrainD << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelD[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceTimeRecord[argStreamId], argDoc.tokenSegment, argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDT.deviceDTIndexValue[argStreamId],argDoc.deviceMaxSecTopic[argStreamId]); /*H_ERR(hipDeviceSynchronize());*/ } //(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, hiprandState_t *randState, double *WTHeadDense, int numOfWordD); void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream) { int numOfWordD = argWT.wordLength - argWT.numOfWordS; /*unsigned int* deviceCounter; hipMalloc(&deviceCounter, sizeof(unsigned int)); hipMemset(deviceCounter, 0, sizeof(unsigned int));*/ hipMemsetAsync(argDoc.deviceCounterSampleKernelS[argStreamId], 0, sizeof(unsigned int), stream); /*hipMemcpyAsync(argDoc.deviceCounterSampleKernelS[argStreamId], &argDoc.counterSampleKernelS, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ //initRandState << <GridDim, BlockDim, 0, stream>> >(randState); /*H_ERR(hipDeviceSynchronize());*/ LDAKernelTrain << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelS[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], numOfWordD, argWT.numOfWordS, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDT.deviceDTIndexValue[argStreamId]); //H_ERR(hipDeviceSynchronize()); } void MaxTopicKernel(WTAll &argWT, Document &argDoc, WTD &argWTDen, int argStreamId, hipStream_t& stream) { int numOfWordD = argWT.wordLength; /*unsigned int* deviceCounter; hipMalloc(&deviceCounter, sizeof(unsigned int));*/ hipMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream); /*hipMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ /*hipMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ MaxTopicDense_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argWTDen.deviceWTDense, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, numOfWordD, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId]); //H_ERR(hipDeviceSynchronize()); /*hipMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ //hipMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream); //MaxTopicSparse_Update_Kernel << <GridDim, BlockDim, 0, stream>> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, numOfWordD, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, argWT.numOfWordS, argWT.deviceWTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId]); /*H_ERR(hipDeviceSynchronize());*/ } //void UpdateProbKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream) //{ // // //unsigned int blockCounter = 0; // //unsigned int* deviceCounter; // //hipMalloc(&deviceCounter, sizeof(unsigned int)); // hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); // /*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ // // initRandState << <GridDim, BlockDim, 0, stream >> >(randState); // /*H_ERR(hipDeviceSynchronize());*/ // // // for (int i = 0; i < iterWT; i++) { // // //hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice); // // UpdateProbKernelTrainD << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment); // // /*H_ERR(hipDeviceSynchronize()); //*/ //} void UpdateProbKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState, int argStreamId, hipStream_t& stream) { //hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); /*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ /*H_ERR(hipDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice); //UpdateProbKernelTrainD0 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment, argDoc.deviceTotalTokenCount[argStreamId]); hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); /*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ //initRandState << <GridDim, BlockDim, 0, stream >> >(randState); /*H_ERR(hipDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice); UpdateProbKernelTrainD1 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment); /*H_ERR(hipDeviceSynchronize()); * */ //unsigned int blockCounter = 0; //unsigned int* deviceCounter; //hipMalloc(&deviceCounter, sizeof(unsigned int)); //hipMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); /*hipMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), hipMemcpyHostToDevice, stream);*/ //initRandState << <GridDim, BlockDim, 0, stream >> >(randState); /*H_ERR(hipDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice); //UpdateProbKernelTrainD2 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment); /*H_ERR(hipDeviceSynchronize());*/ } void PerplexityKernel(Document &argDoc, int argStreamId, hipStream_t& stream) { LDATrainPerplexityReduce << <1, BlockDim, 0, stream >> > (argDoc.devicePerplexityMid[argStreamId], argDoc.totalNumOfTokens, argDoc.devicePerplexityAve[argStreamId]); hipMemcpyAsync(argDoc.perplexityAve, argDoc.devicePerplexityAve[argStreamId],sizeof(float), hipMemcpyDeviceToHost, stream); /*H_ERR(hipDeviceSynchronize());*/ } // //void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState) { // // int blockCounter = 0; // int iterWT = (argWT.numOfWordS - 1) / GridDim + 1; // float Perplexity = 0.0; // int numOfWordD = argWT.wordLength - argWT.numOfWordS; // // srand(time(NULL)); // // // hiprandState_t* randState; // // hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim); // // H_ERR(hipDeviceSynchronize()); // // gpuErr(hipPeekAtLastError()); // // initRandState << <GridDim, BlockDim >> >(randState); // H_ERR(hipDeviceSynchronize()); // // for (int i = 0; i < iterWT; i++) { // // hipMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), hipMemcpyHostToDevice); // // LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); // H_ERR(hipDeviceSynchronize()); // blockCounter++; // // } // LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]); // // H_ERR(hipDeviceSynchronize()); // // //} // // //
056f89f6c8cfeddc33213a23ee9f5a1da1c69ce9.cu
#include "SamplingKernel.cuh" #define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void SampleKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream) { ////unsigned int blockCounter = 0; //unsigned int* deviceCounter; //cudaMalloc(&deviceCounter, sizeof(unsigned int)); cudaMemsetAsync(argDoc.deviceCounterSampleKernelD[argStreamId], 0, sizeof(unsigned int), stream); /*cudaMemcpyAsync(argDoc.deviceCounterSampleKernelD[argStreamId], &argDoc.counterSampleKernelD, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ // srand(time(NULL)); // curandState* randState; // cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim); // H_ERR(cudaDeviceSynchronize()); // gpuErr(cudaPeekAtLastError()); //initRandState << <GridDim, BlockDim, 0, stream >> >(randState); /*H_ERR(cudaDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice); LDAKernelTrainD << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelD[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceTimeRecord[argStreamId], argDoc.tokenSegment, argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDT.deviceDTIndexValue[argStreamId],argDoc.deviceMaxSecTopic[argStreamId]); /*H_ERR(cudaDeviceSynchronize());*/ } //(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, curandState *randState, double *WTHeadDense, int numOfWordD); void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream) { int numOfWordD = argWT.wordLength - argWT.numOfWordS; /*unsigned int* deviceCounter; cudaMalloc(&deviceCounter, sizeof(unsigned int)); cudaMemset(deviceCounter, 0, sizeof(unsigned int));*/ cudaMemsetAsync(argDoc.deviceCounterSampleKernelS[argStreamId], 0, sizeof(unsigned int), stream); /*cudaMemcpyAsync(argDoc.deviceCounterSampleKernelS[argStreamId], &argDoc.counterSampleKernelS, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ //initRandState << <GridDim, BlockDim, 0, stream>> >(randState); /*H_ERR(cudaDeviceSynchronize());*/ LDAKernelTrain << <GridDim, BlockDim, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterSampleKernelS[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], numOfWordD, argWT.numOfWordS, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDT.deviceDTIndexValue[argStreamId]); //H_ERR(cudaDeviceSynchronize()); } void MaxTopicKernel(WTAll &argWT, Document &argDoc, WTD &argWTDen, int argStreamId, cudaStream_t& stream) { int numOfWordD = argWT.wordLength; /*unsigned int* deviceCounter; cudaMalloc(&deviceCounter, sizeof(unsigned int));*/ cudaMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream); /*cudaMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ /*cudaMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ MaxTopicDense_Update_Kernel << <GridDim, BlockDim, 0, stream >> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argWTDen.deviceWTDense, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, numOfWordD, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId]); //H_ERR(cudaDeviceSynchronize()); /*cudaMemcpyAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], &argDoc.counterMaxTopicKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ //cudaMemsetAsync(argDoc.deviceCounterMaxTopicKernel[argStreamId], 0, sizeof(unsigned int),stream); //MaxTopicSparse_Update_Kernel << <GridDim, BlockDim, 0, stream>> >(argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTOffset, numOfWordD, argDoc.deviceCounterMaxTopicKernel[argStreamId], argWT.deviceWTRowSum, argWT.wordLength, argWT.numOfWordS, argWT.deviceWTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, beta, argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId]); /*H_ERR(cudaDeviceSynchronize());*/ } //void UpdateProbKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream) //{ // // //unsigned int blockCounter = 0; // //unsigned int* deviceCounter; // //cudaMalloc(&deviceCounter, sizeof(unsigned int)); // cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); // /*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ // // initRandState << <GridDim, BlockDim, 0, stream >> >(randState); // /*H_ERR(cudaDeviceSynchronize());*/ // // // for (int i = 0; i < iterWT; i++) { // // //cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice); // // UpdateProbKernelTrainD << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment); // // /*H_ERR(cudaDeviceSynchronize()); //*/ //} void UpdateProbKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState, int argStreamId, cudaStream_t& stream) { //cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); /*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ /*H_ERR(cudaDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice); //UpdateProbKernelTrainD0 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment, argDoc.deviceTotalTokenCount[argStreamId]); cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); /*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ //initRandState << <GridDim, BlockDim, 0, stream >> >(randState); /*H_ERR(cudaDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice); UpdateProbKernelTrainD1 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment); /*H_ERR(cudaDeviceSynchronize()); * */ //unsigned int blockCounter = 0; //unsigned int* deviceCounter; //cudaMalloc(&deviceCounter, sizeof(unsigned int)); //cudaMemsetAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], 0, sizeof(unsigned int),stream); /*cudaMemcpyAsync(argDoc.deviceCounterUpdateProbKernel[argStreamId], &argDoc.counterUpdateProbKernel, sizeof(unsigned int), cudaMemcpyHostToDevice, stream);*/ //initRandState << <GridDim, BlockDim, 0, stream >> >(randState); /*H_ERR(cudaDeviceSynchronize());*/ // for (int i = 0; i < iterWT; i++) { //cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice); //UpdateProbKernelTrainD2 << <256, 256, 0, stream >> > (alpha, beta, argDoc.deviceMapWord2Doc[argStreamId], argDoc.deviceTLTopic[argStreamId], argDT.deviceNZDTCount[argStreamId], argDoc.deviceTLDocCount[argStreamId], argDoc.deviceTLDocOffset[argStreamId], argDT.deviceDTCount[argStreamId], argDT.deviceDTOffset[argStreamId], argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount[argStreamId], argDoc.deviceTLWordOffset[argStreamId], argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.deviceCounterUpdateProbKernel[argStreamId], argDoc.deviceMapDoc2Word[argStreamId], argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid[argStreamId], randState, argDoc.deviceWTHeadDense[argStreamId], argWTDen.numOfWordD, argDoc.deviceWordMaxTopic[argStreamId], argDoc.deviceWordSecondMaxTopic[argStreamId], argDoc.deviceMaxProb[argStreamId], argDoc.deviceThresProb[argStreamId], argDoc.deviceWordThirdMaxTopic[argStreamId], argDoc.deviceRandomfloat[argStreamId], argDoc.deviceEffectiveTokenIndex[argStreamId], argDoc.deviceNewTokenCount[argStreamId], argDoc.deviceMaxSecTopic[argStreamId], argDoc.deviceQArray[argStreamId], argDoc.deviceWordMaxProb[argStreamId], argDoc.deviceWordSecondMaxProb[argStreamId], argDoc.deviceWordThirdMaxProb[argStreamId], argDoc.tokenSegment); /*H_ERR(cudaDeviceSynchronize());*/ } void PerplexityKernel(Document &argDoc, int argStreamId, cudaStream_t& stream) { LDATrainPerplexityReduce << <1, BlockDim, 0, stream >> > (argDoc.devicePerplexityMid[argStreamId], argDoc.totalNumOfTokens, argDoc.devicePerplexityAve[argStreamId]); cudaMemcpyAsync(argDoc.perplexityAve, argDoc.devicePerplexityAve[argStreamId],sizeof(float), cudaMemcpyDeviceToHost, stream); /*H_ERR(cudaDeviceSynchronize());*/ } // //void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState) { // // int blockCounter = 0; // int iterWT = (argWT.numOfWordS - 1) / GridDim + 1; // float Perplexity = 0.0; // int numOfWordD = argWT.wordLength - argWT.numOfWordS; // // srand(time(NULL)); // // // curandState* randState; // // cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim); // // H_ERR(cudaDeviceSynchronize()); // // gpuErr(cudaPeekAtLastError()); // // initRandState << <GridDim, BlockDim >> >(randState); // H_ERR(cudaDeviceSynchronize()); // // for (int i = 0; i < iterWT; i++) { // // cudaMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), cudaMemcpyHostToDevice); // // LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); // H_ERR(cudaDeviceSynchronize()); // blockCounter++; // // } // LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]); // // H_ERR(cudaDeviceSynchronize()); // // //} // // //
dc2b60c21e2b2f414d059e9b55a4a2a8ad11025c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ========================================================================== */ /* ========================= CHOLMOD CUDA/C kernels ========================= */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2006, Timothy A. Davis * http://www.suitesparse.com * -------------------------------------------------------------------------- */ #include <stdio.h> #include "SuiteSparse_config.h" /* 64-bit version only */ #define Int SuiteSparse_long extern "C" { __global__ void kernelCreateMap ( Int *d_Map, Int *d_Ls, Int psi, Int nsrow ) /* Ls[supernode row] = Matrix Row */ { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < nsrow ) { d_Map[d_Ls[psi+tid]] = ((Int) (tid)); } } __global__ void kernelCreateRelativeMap ( Int *d_Map, Int *d_Ls, Int *d_RelativeMap, Int pdi1, Int ndrow ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < ndrow ) { d_RelativeMap[tid] = d_Map[d_Ls[pdi1+tid]]; } } __global__ void kernelAddUpdate ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow ) { int idrow = blockIdx.x * blockDim.x + threadIdx.x; int idcol = blockIdx.y * blockDim.y + threadIdx.y; if ( idrow < ndrow2 && idcol < ndrow1 ) { Int idx = d_RelativeMap[idrow] + d_RelativeMap[idcol] * nsrow; d_A[idx] += devPtrC[idrow+ndrow2*idcol]; } } __global__ void kernelAddComplexUpdate ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow ) { int idrow = blockIdx.x * blockDim.x + threadIdx.x; int idcol = blockIdx.y * blockDim.y + threadIdx.y; if ( idrow < ndrow2 && idcol < ndrow1 ) { Int idx = d_RelativeMap[idrow] + d_RelativeMap[idcol] * nsrow; d_A[idx*2] += devPtrC[(idrow+ndrow2*idcol)*2]; d_A[idx*2+1] += devPtrC[(idrow+ndrow2*idcol)*2+1]; } } __global__ void kernelSumA ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { int isrow = blockIdx.x * blockDim.x + threadIdx.x; int iscol = blockIdx.y * blockDim.y + threadIdx.y; if ( isrow < nsrow && iscol < nscol ) { Int idx = iscol*nsrow + isrow; a1[idx] += alpha * a2[idx]; } } __global__ void kernelSumComplexA ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { int isrow = blockIdx.x * blockDim.x + threadIdx.x; int iscol = blockIdx.y * blockDim.y + threadIdx.y; if ( isrow < nsrow && iscol < nscol ) { Int idx = iscol*nsrow + isrow; a1[idx*2] += alpha * a2[idx*2]; a1[idx*2+1] += alpha * a2[idx*2+1]; } } /* ======================================================================== */ /* using Ls and Lpi data already on the device, construct Map */ /* ======================================================================== */ int createMapOnDevice ( Int *d_Map, Int *d_Ls, Int psi, Int nsrow ) { unsigned int kgrid = (nsrow+31)/32; unsigned int kblock = 32; hipLaunchKernelGGL(( kernelCreateMap) , dim3(kgrid), dim3(kblock), 0, 0, d_Map, d_Ls, psi, nsrow ); return 0; } int createRelativeMapOnDevice ( Int *d_Map, Int *d_Ls, Int *d_RelativeMap,Int pdi1, Int ndrow, hipStream_t* astream ) { unsigned int kgrid = (ndrow+255)/256; unsigned int kblock = 256; hipLaunchKernelGGL(( kernelCreateRelativeMap) , dim3(kgrid), dim3(kblock), 0, *astream, d_Map, d_Ls, d_RelativeMap, pdi1, ndrow); return 0; } /* ======================================================================== */ int addUpdateOnDevice ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow, hipStream_t* astream ) /* ======================================================================== */ /* Assemble the Schur complment from a descendant supernode into the current supernode */ /* ======================================================================== */ { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (ndrow2+15)/16; grids.y = (ndrow1+15)/16; hipLaunchKernelGGL(( kernelAddUpdate) , dim3(grids), dim3(blocks), 0, *astream, d_A, devPtrC, d_RelativeMap, ndrow1, ndrow2, nsrow ); return 0; } /* ======================================================================== */ int addComplexUpdateOnDevice ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow, hipStream_t* astream ) /* ======================================================================== */ /* Assemble the Schur complment from a descendant supernode into the current supernode */ /* ======================================================================== */ { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (ndrow2+15)/16; grids.y = (ndrow1+15)/16; hipLaunchKernelGGL(( kernelAddComplexUpdate) , dim3(grids), dim3(blocks), 0, *astream, d_A, devPtrC, d_RelativeMap, ndrow1, ndrow2, nsrow ); return 0; } int sumAOnDevice ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (nsrow+15)/16; grids.y = (nscol+15)/16; hipLaunchKernelGGL(( kernelSumA) , dim3(grids), dim3(blocks), 0, 0, a1, a2, alpha, nsrow, nscol ); return 0; } int sumComplexAOnDevice ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (nsrow+15)/16; grids.y = (nscol+15)/16; hipLaunchKernelGGL(( kernelSumComplexA) , dim3(grids), dim3(blocks), 0, 0, a1, a2, alpha, nsrow, nscol ); return 0; } }
dc2b60c21e2b2f414d059e9b55a4a2a8ad11025c.cu
/* ========================================================================== */ /* ========================= CHOLMOD CUDA/C kernels ========================= */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2006, Timothy A. Davis * http://www.suitesparse.com * -------------------------------------------------------------------------- */ #include <stdio.h> #include "SuiteSparse_config.h" /* 64-bit version only */ #define Int SuiteSparse_long extern "C" { __global__ void kernelCreateMap ( Int *d_Map, Int *d_Ls, Int psi, Int nsrow ) /* Ls[supernode row] = Matrix Row */ { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < nsrow ) { d_Map[d_Ls[psi+tid]] = ((Int) (tid)); } } __global__ void kernelCreateRelativeMap ( Int *d_Map, Int *d_Ls, Int *d_RelativeMap, Int pdi1, Int ndrow ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < ndrow ) { d_RelativeMap[tid] = d_Map[d_Ls[pdi1+tid]]; } } __global__ void kernelAddUpdate ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow ) { int idrow = blockIdx.x * blockDim.x + threadIdx.x; int idcol = blockIdx.y * blockDim.y + threadIdx.y; if ( idrow < ndrow2 && idcol < ndrow1 ) { Int idx = d_RelativeMap[idrow] + d_RelativeMap[idcol] * nsrow; d_A[idx] += devPtrC[idrow+ndrow2*idcol]; } } __global__ void kernelAddComplexUpdate ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow ) { int idrow = blockIdx.x * blockDim.x + threadIdx.x; int idcol = blockIdx.y * blockDim.y + threadIdx.y; if ( idrow < ndrow2 && idcol < ndrow1 ) { Int idx = d_RelativeMap[idrow] + d_RelativeMap[idcol] * nsrow; d_A[idx*2] += devPtrC[(idrow+ndrow2*idcol)*2]; d_A[idx*2+1] += devPtrC[(idrow+ndrow2*idcol)*2+1]; } } __global__ void kernelSumA ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { int isrow = blockIdx.x * blockDim.x + threadIdx.x; int iscol = blockIdx.y * blockDim.y + threadIdx.y; if ( isrow < nsrow && iscol < nscol ) { Int idx = iscol*nsrow + isrow; a1[idx] += alpha * a2[idx]; } } __global__ void kernelSumComplexA ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { int isrow = blockIdx.x * blockDim.x + threadIdx.x; int iscol = blockIdx.y * blockDim.y + threadIdx.y; if ( isrow < nsrow && iscol < nscol ) { Int idx = iscol*nsrow + isrow; a1[idx*2] += alpha * a2[idx*2]; a1[idx*2+1] += alpha * a2[idx*2+1]; } } /* ======================================================================== */ /* using Ls and Lpi data already on the device, construct Map */ /* ======================================================================== */ int createMapOnDevice ( Int *d_Map, Int *d_Ls, Int psi, Int nsrow ) { unsigned int kgrid = (nsrow+31)/32; unsigned int kblock = 32; kernelCreateMap <<<kgrid, kblock>>> ( d_Map, d_Ls, psi, nsrow ); return 0; } int createRelativeMapOnDevice ( Int *d_Map, Int *d_Ls, Int *d_RelativeMap,Int pdi1, Int ndrow, cudaStream_t* astream ) { unsigned int kgrid = (ndrow+255)/256; unsigned int kblock = 256; kernelCreateRelativeMap <<<kgrid, kblock, 0, *astream>>> ( d_Map, d_Ls, d_RelativeMap, pdi1, ndrow); return 0; } /* ======================================================================== */ int addUpdateOnDevice ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow, cudaStream_t* astream ) /* ======================================================================== */ /* Assemble the Schur complment from a descendant supernode into the current supernode */ /* ======================================================================== */ { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (ndrow2+15)/16; grids.y = (ndrow1+15)/16; kernelAddUpdate <<<grids, blocks, 0, *astream>>> ( d_A, devPtrC, d_RelativeMap, ndrow1, ndrow2, nsrow ); return 0; } /* ======================================================================== */ int addComplexUpdateOnDevice ( double *d_A, double *devPtrC, Int *d_RelativeMap, Int ndrow1, Int ndrow2, Int nsrow, cudaStream_t* astream ) /* ======================================================================== */ /* Assemble the Schur complment from a descendant supernode into the current supernode */ /* ======================================================================== */ { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (ndrow2+15)/16; grids.y = (ndrow1+15)/16; kernelAddComplexUpdate <<<grids, blocks, 0, *astream>>> ( d_A, devPtrC, d_RelativeMap, ndrow1, ndrow2, nsrow ); return 0; } int sumAOnDevice ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (nsrow+15)/16; grids.y = (nscol+15)/16; kernelSumA <<<grids, blocks, 0, 0>>> ( a1, a2, alpha, nsrow, nscol ); return 0; } int sumComplexAOnDevice ( double *a1, double *a2, const double alpha, int nsrow, int nscol ) { dim3 grids; dim3 blocks; blocks.x = 16; blocks.y = 16; blocks.z = 1; grids.x = (nsrow+15)/16; grids.y = (nscol+15)/16; kernelSumComplexA <<<grids, blocks, 0, 0>>> ( a1, a2, alpha, nsrow, nscol ); return 0; } }
9b90c8b37b670a854103cee42c22048e83ead291.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <sys/time.h> #include <fstream> #include <functional> #include "HugeCTR/include/data_generator.hpp" #include "HugeCTR/include/data_readers/data_reader.hpp" #include "HugeCTR/include/embeddings/localized_slot_sparse_embedding_hash.hpp" #include "gtest/gtest.h" #include "roctracer/roctx.h" #include "utest/embedding/embedding_test_utils.hpp" #include "utest/embedding/sparse_embedding_hash_cpu.hpp" #include "utest/test_utils.h" using namespace HugeCTR; using namespace embedding_test; namespace { //--------------------------------------------------------------------------------------- // global params for all testing const int train_batch_num = 10; // can not more than 32 const int test_batch_num = 1; const int train_batchsize = 1024; const int test_batchsize = 2560; const int slot_num = 26; const int max_nnz_per_slot = 1; const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample const long long vocabulary_size = slot_num * 100; const int embedding_vec_size = 128; const int combiner = 0; // 0-sum, 1-mean const long long label_dim = 1; const long long dense_dim = 0; typedef long long T; const float scaler = 1.0f; // used in mixed precision training // In order to not allocate the total size of hash table on each GPU, the users need to set the // size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count, // eg: 1.25x of that. const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation const int num_files = 1; const Check_t CHK = Check_t::Sum; // Check_t::Sum const char *train_file_list_name = "train_file_list.txt"; const char *test_file_list_name = "test_file_list.txt"; const char *prefix = "./data_reader_test_data/temp_dataset_"; #ifndef NCCl_A2A const std::string plan_file(PROJECT_HOME_ + "utest/all2all_plan_dgx_{0,1,2,3,4,5,6,7}.json"); #else const std::string plan_file = ""; #endif const char *hash_table_file_name = "localized_hash_table.bin"; std::vector<size_t> slot_sizes; // null means use vocabulary_size/gpu_count/load_factor as // max_vocabulary_size_per_gpu // CAUSION: must match vocabulary_size // std::vector<size_t> slot_sizes = {39884406,39043,17289,7420,20263,3,7120,1543,63,38532951, // 2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36}; // // for cretio dataset // std::vector<size_t> slot_sizes = // {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}; // // just for verify //----------------------------------------------------------------------------------------- template <typename TypeEmbeddingComp> void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { OptHyperParams<TypeEmbeddingComp> hyper_params; hyper_params.adam.beta1 = 0.9f; hyper_params.adam.beta2 = 0.999f; float tolerance; if (std::is_same<TypeEmbeddingComp, __half>::value) { hyper_params.adam.epsilon = 1e-4f; tolerance = 5e-3f; } else { hyper_params.adam.epsilon = 1e-7f; tolerance = 1e-4f; } hyper_params.momentum.factor = 0.9f; hyper_params.nesterov.mu = 0.9f; const float lr = optimizer == Optimizer_t::Adam ? 0.001f : 0.01f; const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, update_type, scaler}; test::mpi_init(); int numprocs = 1; #ifdef ENABLE_MPI MPI_Comm_size(MPI_COMM_WORLD, &numprocs); #endif // if there are multi-node, we assume each node has the same gpu device_list std::vector<std::vector<int>> vvgpu; for (int i = 0; i < numprocs; i++) { vvgpu.push_back(device_list); } const auto &resource_manager = ResourceManager::create(vvgpu, 0); if (resource_manager->is_master_process()) { std::cout << "rank " << resource_manager->get_process_id() << " is generating data" << std::endl; // re-generate the dataset files { std::ifstream file(train_file_list_name); if (file.good()) { std::remove(train_file_list_name); } } { std::ifstream file(test_file_list_name); if (file.good()) { std::remove(test_file_list_name); } } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batchsize * train_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batchsize * test_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batchsize * train_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batchsize * test_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot); } } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); std::cout << "This is rank: " << resource_manager->get_process_id() << std::endl; #endif // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); std::unique_ptr<DataReader<T>> test_data_reader(new DataReader<T>( test_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads)); test_data_reader->create_drwg_norm(test_file_list_name, CHK); slot_sizes.clear(); // don't init hashtable when doing training correctness checking. // Because we will upload hashtable to GPUs. // generate hashtable if (resource_manager->is_master_process()) { std::cout << "Init hash table"; // init hash table file: <key, solt_id, value> std::ofstream fs(hash_table_file_name); if (!fs.is_open()) { ERROR_MESSAGE_("Error: file not open for writing"); } // UnifiedDataSimulator<T> ldata_sim(0, slot_num-1); // for slot_id test::UniformDataSimulator fdata_sim; // for value std::unique_ptr<float[]> buf(new float[embedding_vec_size]); for (long long i = 0; i < vocabulary_size; i++) { T key = (T)i; // T key = ldata_sim.get_num(); // CAUSION: can not set random keys here, because we need to ensure that: // 1) we can find keys in the data file from this hash table // 2) there are no repeated keys fs.write((char *)&key, sizeof(T)); T slot_id; if (slot_sizes.size() == 0) { slot_id = key % slot_num; // CAUSION: need to dedicate the slot_id for each key for // correctness verification } else { size_t offset = 0; for (size_t j = 0; j < slot_sizes.size(); j++) { if ((key >= static_cast<T>(offset)) && (key < static_cast<T>(offset + slot_sizes[j]))) { slot_id = (T)j; break; } offset += slot_sizes[j]; } } fs.write((char *)&slot_id, sizeof(T)); fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f); fs.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float)); } fs.close(); std::cout << " Done" << std::endl; } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif const SparseEmbeddingHashParams<TypeEmbeddingComp> embedding_params = { train_batchsize, test_batchsize, vocabulary_size, {}, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingHash<T, TypeEmbeddingComp>( bags_to_tensors<T>(train_data_reader->get_row_offsets_tensors()), bags_to_tensors<T>(train_data_reader->get_value_tensors()), train_data_reader->get_nnz_array(), bags_to_tensors<T>(test_data_reader->get_row_offsets_tensors()), bags_to_tensors<T>(test_data_reader->get_value_tensors()), test_data_reader->get_nnz_array(), embedding_params, plan_file, resource_manager)); { // upload hash table to device std::ifstream fs(hash_table_file_name); embedding->load_parameters(fs); fs.close(); } // for SparseEmbeddingCpu std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, opt_params, train_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results(); TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results(); T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr(); float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr(); // for results check std::shared_ptr<GeneralBuffer2<HostAllocator>> buf = GeneralBuffer2<HostAllocator>::create(); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu); Tensor2<TypeEmbeddingComp> wgrad_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &wgrad_from_gpu); Tensor2<T> hash_table_key_from_gpu; buf->reserve({vocabulary_size}, &hash_table_key_from_gpu); Tensor2<float> hash_table_value_from_gpu; buf->reserve({vocabulary_size * embedding_vec_size}, &hash_table_value_from_gpu); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu_eval; buf->reserve({test_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu_eval); buf->allocate(); typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue; for (int i = 0; i < train_batch_num; i++) { printf("Rank%d: Round %d start training:\n", resource_manager->get_process_id(), i); // call read a batch printf("Rank%d: data_reader->read_a_batch_to_device()\n", resource_manager->get_process_id()); train_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding->forward()\n", resource_manager->get_process_id()); embedding->forward(true); // check the result of forward printf("Rank%d: embedding->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(true, embedding_feature_from_gpu); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu->forward()\n"); embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu.get_ptr(), embedding_feature_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU backward printf("Rank%d: embedding->backward()\n", resource_manager->get_process_id()); embedding->backward(); // check the result of backward printf("Rank%d: embedding->get_backward_results()\n", resource_manager->get_process_id()); embedding->get_backward_results(wgrad_from_gpu, 0); if (resource_manager->is_master_process()) { // CPU backward printf("Rank0: embedding_cpu->backward()\n"); embedding_cpu->backward(); printf("Rank0: check backward results: GPU and CPU\n"); ASSERT_TRUE(compare_wgrad(train_batchsize * slot_num * embedding_vec_size, wgrad_from_gpu.get_ptr(), wgrad_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU update_params printf("Rank%d: embedding->update_params()\n", resource_manager->get_process_id()); embedding->update_params(); // check the results of update params printf("Rank%d: embedding->get_update_params_results()\n", resource_manager->get_process_id()); embedding->get_update_params_results(hash_table_key_from_gpu, hash_table_value_from_gpu); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU update_params printf("Rank0: embedding_cpu->update_params()\n"); embedding_cpu->update_params(); printf("Rank0: check update_params results\n"); ASSERT_TRUE(compare_hash_table( vocabulary_size, hash_table_key_from_gpu.get_ptr(), reinterpret_cast<TypeHashValue *>(hash_table_value_from_gpu.get_ptr()), hash_table_key_from_cpu, reinterpret_cast<TypeHashValue *>(hash_table_value_from_cpu), tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // create new obj for eval() { std::ofstream fs(hash_table_file_name); embedding->dump_parameters(fs); fs.close(); } // for SparseEmbeddingCpu eval std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, test_batch_num * test_batchsize, combiner, opt_params, test_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results(); ///////////////////////////////////////////////////////////////////////////////////////////// // eval { printf("\nRank%d: Round start eval:\n", resource_manager->get_process_id()); // call read a batch printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", resource_manager->get_process_id()); test_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding_eval->forward()\n", resource_manager->get_process_id()); embedding->forward(false); // check the result of forward printf("Rank%d: embedding_eval->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(false, embedding_feature_from_gpu_eval); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu_eval->forward()\n"); test_embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu_eval.get_ptr(), embedding_feature_from_cpu_eval, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif printf("Rank%d: Round end:\n", resource_manager->get_process_id()); } test::mpi_finalize(); } } // namespace TEST(localized_sparse_embedding_hash_test, fp32_sgd_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_sgd_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_sgd_global_update_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_sgd_global_update_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_global_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_global_update_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_adam_1gpu) { train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_adam_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_adam_global_update_1gpu) { train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_adam_global_update_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_1gpu) { train_and_test<float>({0}, Optimizer_t::Adam, Update_t::LazyGlobal); } TEST(localized_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal); } TEST(localized_sparse_embedding_hash_test, fp16_adam_1gpu) { train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_adam_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_adam_global_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_adam_global_update_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::LazyGlobal); } TEST(localized_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal); }
9b90c8b37b670a854103cee42c22048e83ead291.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <sys/time.h> #include <fstream> #include <functional> #include "HugeCTR/include/data_generator.hpp" #include "HugeCTR/include/data_readers/data_reader.hpp" #include "HugeCTR/include/embeddings/localized_slot_sparse_embedding_hash.hpp" #include "gtest/gtest.h" #include "nvToolsExt.h" #include "utest/embedding/embedding_test_utils.hpp" #include "utest/embedding/sparse_embedding_hash_cpu.hpp" #include "utest/test_utils.h" using namespace HugeCTR; using namespace embedding_test; namespace { //--------------------------------------------------------------------------------------- // global params for all testing const int train_batch_num = 10; // can not more than 32 const int test_batch_num = 1; const int train_batchsize = 1024; const int test_batchsize = 2560; const int slot_num = 26; const int max_nnz_per_slot = 1; const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample const long long vocabulary_size = slot_num * 100; const int embedding_vec_size = 128; const int combiner = 0; // 0-sum, 1-mean const long long label_dim = 1; const long long dense_dim = 0; typedef long long T; const float scaler = 1.0f; // used in mixed precision training // In order to not allocate the total size of hash table on each GPU, the users need to set the // size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count, // eg: 1.25x of that. const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation const int num_files = 1; const Check_t CHK = Check_t::Sum; // Check_t::Sum const char *train_file_list_name = "train_file_list.txt"; const char *test_file_list_name = "test_file_list.txt"; const char *prefix = "./data_reader_test_data/temp_dataset_"; #ifndef NCCl_A2A const std::string plan_file(PROJECT_HOME_ + "utest/all2all_plan_dgx_{0,1,2,3,4,5,6,7}.json"); #else const std::string plan_file = ""; #endif const char *hash_table_file_name = "localized_hash_table.bin"; std::vector<size_t> slot_sizes; // null means use vocabulary_size/gpu_count/load_factor as // max_vocabulary_size_per_gpu // CAUSION: must match vocabulary_size // std::vector<size_t> slot_sizes = {39884406,39043,17289,7420,20263,3,7120,1543,63,38532951, // 2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36}; // // for cretio dataset // std::vector<size_t> slot_sizes = // {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}; // // just for verify //----------------------------------------------------------------------------------------- template <typename TypeEmbeddingComp> void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer, const Update_t &update_type) { OptHyperParams<TypeEmbeddingComp> hyper_params; hyper_params.adam.beta1 = 0.9f; hyper_params.adam.beta2 = 0.999f; float tolerance; if (std::is_same<TypeEmbeddingComp, __half>::value) { hyper_params.adam.epsilon = 1e-4f; tolerance = 5e-3f; } else { hyper_params.adam.epsilon = 1e-7f; tolerance = 1e-4f; } hyper_params.momentum.factor = 0.9f; hyper_params.nesterov.mu = 0.9f; const float lr = optimizer == Optimizer_t::Adam ? 0.001f : 0.01f; const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, update_type, scaler}; test::mpi_init(); int numprocs = 1; #ifdef ENABLE_MPI MPI_Comm_size(MPI_COMM_WORLD, &numprocs); #endif // if there are multi-node, we assume each node has the same gpu device_list std::vector<std::vector<int>> vvgpu; for (int i = 0; i < numprocs; i++) { vvgpu.push_back(device_list); } const auto &resource_manager = ResourceManager::create(vvgpu, 0); if (resource_manager->is_master_process()) { std::cout << "rank " << resource_manager->get_process_id() << " is generating data" << std::endl; // re-generate the dataset files { std::ifstream file(train_file_list_name); if (file.good()) { std::remove(train_file_list_name); } } { std::ifstream file(test_file_list_name); if (file.good()) { std::remove(test_file_list_name); } } // data generation: key's corresponding slot_id=(key%slot_num) if (slot_sizes.size() > 0) { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batchsize * train_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batchsize * test_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot, slot_sizes); } else { HugeCTR::data_generation_for_localized_test<T, CHK>( train_file_list_name, prefix, num_files, train_batchsize * train_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot); HugeCTR::data_generation_for_localized_test<T, CHK>( test_file_list_name, prefix, num_files, test_batchsize * test_batch_num, slot_num, vocabulary_size, label_dim, dense_dim, max_nnz_per_slot); } } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); std::cout << "This is rank: " << resource_manager->get_process_id() << std::endl; #endif // setup a data reader const DataReaderSparseParam param = {DataReaderSparse_t::Localized, max_nnz_per_slot * slot_num, max_nnz_per_slot, slot_num}; std::vector<DataReaderSparseParam> params; params.push_back(param); std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>( train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads)); train_data_reader->create_drwg_norm(train_file_list_name, CHK); std::unique_ptr<DataReader<T>> test_data_reader(new DataReader<T>( test_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads)); test_data_reader->create_drwg_norm(test_file_list_name, CHK); slot_sizes.clear(); // don't init hashtable when doing training correctness checking. // Because we will upload hashtable to GPUs. // generate hashtable if (resource_manager->is_master_process()) { std::cout << "Init hash table"; // init hash table file: <key, solt_id, value> std::ofstream fs(hash_table_file_name); if (!fs.is_open()) { ERROR_MESSAGE_("Error: file not open for writing"); } // UnifiedDataSimulator<T> ldata_sim(0, slot_num-1); // for slot_id test::UniformDataSimulator fdata_sim; // for value std::unique_ptr<float[]> buf(new float[embedding_vec_size]); for (long long i = 0; i < vocabulary_size; i++) { T key = (T)i; // T key = ldata_sim.get_num(); // CAUSION: can not set random keys here, because we need to ensure that: // 1) we can find keys in the data file from this hash table // 2) there are no repeated keys fs.write((char *)&key, sizeof(T)); T slot_id; if (slot_sizes.size() == 0) { slot_id = key % slot_num; // CAUSION: need to dedicate the slot_id for each key for // correctness verification } else { size_t offset = 0; for (size_t j = 0; j < slot_sizes.size(); j++) { if ((key >= static_cast<T>(offset)) && (key < static_cast<T>(offset + slot_sizes[j]))) { slot_id = (T)j; break; } offset += slot_sizes[j]; } } fs.write((char *)&slot_id, sizeof(T)); fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f); fs.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float)); } fs.close(); std::cout << " Done" << std::endl; } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif const SparseEmbeddingHashParams<TypeEmbeddingComp> embedding_params = { train_batchsize, test_batchsize, vocabulary_size, {}, embedding_vec_size, max_feature_num, slot_num, combiner, opt_params}; std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding( new LocalizedSlotSparseEmbeddingHash<T, TypeEmbeddingComp>( bags_to_tensors<T>(train_data_reader->get_row_offsets_tensors()), bags_to_tensors<T>(train_data_reader->get_value_tensors()), train_data_reader->get_nnz_array(), bags_to_tensors<T>(test_data_reader->get_row_offsets_tensors()), bags_to_tensors<T>(test_data_reader->get_value_tensors()), test_data_reader->get_nnz_array(), embedding_params, plan_file, resource_manager)); { // upload hash table to device std::ifstream fs(hash_table_file_name); embedding->load_parameters(fs); fs.close(); } // for SparseEmbeddingCpu std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, opt_params, train_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results(); TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results(); T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr(); float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr(); // for results check std::shared_ptr<GeneralBuffer2<HostAllocator>> buf = GeneralBuffer2<HostAllocator>::create(); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu); Tensor2<TypeEmbeddingComp> wgrad_from_gpu; buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &wgrad_from_gpu); Tensor2<T> hash_table_key_from_gpu; buf->reserve({vocabulary_size}, &hash_table_key_from_gpu); Tensor2<float> hash_table_value_from_gpu; buf->reserve({vocabulary_size * embedding_vec_size}, &hash_table_value_from_gpu); Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu_eval; buf->reserve({test_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu_eval); buf->allocate(); typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue; for (int i = 0; i < train_batch_num; i++) { printf("Rank%d: Round %d start training:\n", resource_manager->get_process_id(), i); // call read a batch printf("Rank%d: data_reader->read_a_batch_to_device()\n", resource_manager->get_process_id()); train_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding->forward()\n", resource_manager->get_process_id()); embedding->forward(true); // check the result of forward printf("Rank%d: embedding->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(true, embedding_feature_from_gpu); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu->forward()\n"); embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu.get_ptr(), embedding_feature_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU backward printf("Rank%d: embedding->backward()\n", resource_manager->get_process_id()); embedding->backward(); // check the result of backward printf("Rank%d: embedding->get_backward_results()\n", resource_manager->get_process_id()); embedding->get_backward_results(wgrad_from_gpu, 0); if (resource_manager->is_master_process()) { // CPU backward printf("Rank0: embedding_cpu->backward()\n"); embedding_cpu->backward(); printf("Rank0: check backward results: GPU and CPU\n"); ASSERT_TRUE(compare_wgrad(train_batchsize * slot_num * embedding_vec_size, wgrad_from_gpu.get_ptr(), wgrad_from_cpu, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif // GPU update_params printf("Rank%d: embedding->update_params()\n", resource_manager->get_process_id()); embedding->update_params(); // check the results of update params printf("Rank%d: embedding->get_update_params_results()\n", resource_manager->get_process_id()); embedding->get_update_params_results(hash_table_key_from_gpu, hash_table_value_from_gpu); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU update_params printf("Rank0: embedding_cpu->update_params()\n"); embedding_cpu->update_params(); printf("Rank0: check update_params results\n"); ASSERT_TRUE(compare_hash_table( vocabulary_size, hash_table_key_from_gpu.get_ptr(), reinterpret_cast<TypeHashValue *>(hash_table_value_from_gpu.get_ptr()), hash_table_key_from_cpu, reinterpret_cast<TypeHashValue *>(hash_table_value_from_cpu), tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // create new obj for eval() { std::ofstream fs(hash_table_file_name); embedding->dump_parameters(fs); fs.close(); } // for SparseEmbeddingCpu eval std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu( new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>( test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim, dense_dim, CHK, test_batch_num * test_batchsize, combiner, opt_params, test_file_list_name, hash_table_file_name, SparseEmbedding_t::Localized)); TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results(); ///////////////////////////////////////////////////////////////////////////////////////////// // eval { printf("\nRank%d: Round start eval:\n", resource_manager->get_process_id()); // call read a batch printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", resource_manager->get_process_id()); test_data_reader->read_a_batch_to_device(); // GPU forward printf("Rank%d: embedding_eval->forward()\n", resource_manager->get_process_id()); embedding->forward(false); // check the result of forward printf("Rank%d: embedding_eval->get_forward_results()\n", resource_manager->get_process_id()); embedding->get_forward_results(false, embedding_feature_from_gpu_eval); // memcpy from GPU to CPU if (resource_manager->is_master_process()) { // CPU forward printf("Rank0: embedding_cpu_eval->forward()\n"); test_embedding_cpu->forward(); printf("Rank0: check forward results\n"); ASSERT_TRUE(compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size, embedding_feature_from_gpu_eval.get_ptr(), embedding_feature_from_cpu_eval, tolerance)); } #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif printf("Rank%d: Round end:\n", resource_manager->get_process_id()); } test::mpi_finalize(); } } // namespace TEST(localized_sparse_embedding_hash_test, fp32_sgd_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_sgd_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_sgd_global_update_1gpu) { train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_sgd_global_update_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_global_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_sgd_global_update_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_adam_1gpu) { train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_adam_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp32_adam_global_update_1gpu) { train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_adam_global_update_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_1gpu) { train_and_test<float>({0}, Optimizer_t::Adam, Update_t::LazyGlobal); } TEST(localized_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_8gpu) { train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal); } TEST(localized_sparse_embedding_hash_test, fp16_adam_1gpu) { train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_adam_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local); } TEST(localized_sparse_embedding_hash_test, fp16_adam_global_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_adam_global_update_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global); } TEST(localized_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_1gpu) { train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::LazyGlobal); } TEST(localized_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_8gpu) { train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal); }
bb400b17ccef989ee096a5b4d8d558b873e022cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_b; int xdim0_update_halo_kernel4_plus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_b; int ydim0_update_halo_kernel4_plus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_b; int xdim1_update_halo_kernel4_plus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_b; int ydim1_update_halo_kernel4_plus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_b*(y)+xdim0_update_halo_kernel4_plus_2_b*ydim0_update_halo_kernel4_plus_2_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_b*(y)+xdim1_update_halo_kernel4_plus_2_b*ydim1_update_halo_kernel4_plus_2_b*(z)) //user function __device__ inline void update_halo_kernel4_plus_2_b_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(-2,0,0)]; if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(-2,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_b + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_b + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_b_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,78)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(78,"update_halo_kernel4_plus_2_b"); OPS_kernels[78].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h || ydim0 != ydim0_update_halo_kernel4_plus_2_b_h || xdim1 != xdim1_update_halo_kernel4_plus_2_b_h || ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_plus_2_b_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_plus_2_b_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_plus_2_b_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_plus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[78].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_b), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[78].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[78].mpi_time += t2-t1; OPS_kernels[78].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[78].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 78; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 78; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_b_execute; if (OPS_diags > 1) { ops_timing_realloc(78,"update_halo_kernel4_plus_2_b"); } ops_enqueue_kernel(desc); } #endif
bb400b17ccef989ee096a5b4d8d558b873e022cb.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_b; int xdim0_update_halo_kernel4_plus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_b; int ydim0_update_halo_kernel4_plus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_b; int xdim1_update_halo_kernel4_plus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_b; int ydim1_update_halo_kernel4_plus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_b*(y)+xdim0_update_halo_kernel4_plus_2_b*ydim0_update_halo_kernel4_plus_2_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_b*(y)+xdim1_update_halo_kernel4_plus_2_b*ydim1_update_halo_kernel4_plus_2_b*(z)) //user function __device__ inline void update_halo_kernel4_plus_2_b_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(-2,0,0)]; if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(-2,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_b + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_b + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_b_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,78)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(78,"update_halo_kernel4_plus_2_b"); OPS_kernels[78].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h || ydim0 != ydim0_update_halo_kernel4_plus_2_b_h || xdim1 != xdim1_update_halo_kernel4_plus_2_b_h || ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_plus_2_b_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_plus_2_b_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_plus_2_b_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_plus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[78].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel4_plus_2_b<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[78].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[78].mpi_time += t2-t1; OPS_kernels[78].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[78].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 78; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 78; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_b_execute; if (OPS_diags > 1) { ops_timing_realloc(78,"update_halo_kernel4_plus_2_b"); } ops_enqueue_kernel(desc); } #endif
a2d6eaecd541ed92e69848c5f1100ffd71e59df5.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <vector_functions.h> #include <nori/scene.h> #include <nori/camera.h> #include <hip/device_functions.h> #include <nori/independent.h> #include <nori/perspective.h> #include <nori/integrator.h> #include <nori/pathMisIntegrator.h> #include <nori/bvh.h> #include <chrono> #include <unistd.h> NORI_NAMESPACE_BEGIN #define IDX2(i,j,N) (((i)*(N))+(j)) #define COL_NORMSQ(c) (c.x()*c.x() + c.y()*c.y() + c.z()*c.z()) __global__ void copy_surface_to_data(hipSurfaceObject_t cuda_data, int width, int height, Color3f* data) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) { for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) { float4 val; surf2Dread(&val, cuda_data,(int) sizeof(float4) * x, height - y,hipBoundaryModeClamp); data[IDX2(x,y,height)] = Color3f(val.x, val.y, val.z); } } } __global__ void copy_weighted_data_to_surface(hipSurfaceObject_t cuda_data, Color3f* image, int width, int height, Color3f* flt, float* wgtsum) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) { for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) { int idx = IDX2(x,y,height); Color3f val = flt[idx] / wgtsum[idx]; image[idx] = val; val = val.toLinearRGB(); surf2Dwrite(make_float4(val.x(),val.y(),val.z(),1.0f), cuda_data, (int) sizeof(float4) * x, height - y, hipBoundaryModeClamp); } } } // see lecture 16a-slide 16 __global__ void nl_means_filter_gpu(Color3f* image, float* wgtsum, Color3f* flt, int width, int height, Scene* scene) { const int r = scene->filter_r; const int f = scene->filter_f; const float sigma = scene->filter_sigma; const float sigma2 = sigma*sigma; //const float h2=0.45f*0.45f; const float h = scene->filter_h; const float h2=h*h; for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) { for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) { // loop over neighbors int minX = max(0,x-r); int maxX = min(width-1,x+r); int minY = max(0,y-r); int maxY = min(height-1,y+r); for (int i=minX; i<=maxX; ++i) { for (int j=minY; j<=maxY; ++j) { // here we consider neighbour (i,j) float d2patch = 0; int patchMinX = max(max(-f, -x), -i); int patchMaxX = min(min(f, width-x-1), width-i-1); int patchMinY = max(max(-f,-y), -j); int patchMaxY = min(min(f, height-y-1), height-j-1); // loop over patch of size (2f+1)^2 for (int k=patchMinX;k<=patchMaxX;++k) { for (int l=patchMinY;l<=patchMaxY;++l) { Color3f color_p = image[IDX2(x+k,y+l,height)]; Color3f color_q = image[IDX2(i+k,j+l,height)]; float w = exp(-(k*k+l*l)/(2.0f*sigma2)); float patchv = (COL_NORMSQ((color_q-color_p).eval()))/h2; d2patch += w*patchv; } } d2patch /= ((patchMaxX-patchMinX+1)*(patchMaxY-patchMinY+1)); float wgt = exp(-max(d2patch,0.0f)); int idx = IDX2(x,y,height); wgtsum[idx] += wgt; flt[idx] += wgt*image[IDX2(i,j,height)]; } } } } } /** * * @param resource * @param w * @param h * @param scene scene object on GPU memory */ static float filterInvoked = false; static std::chrono::milliseconds filterStartTime; static float* filterWeights; static Color3f* filterOut; void filter_scene(hipSurfaceObject_t resource, int w, int h, nori::Scene *scene,Color3f *image) { if (!filterInvoked) { int blockSize; int gridSize; hipOccupancyMaxPotentialBlockSize(&gridSize,&blockSize,nl_means_filter_gpu,0,w*h); //we want to render 2D blocks not lines int blockW = sqrt(blockSize); int blockH = blockSize/blockW; dim3 block(blockW,blockH); int gridSizeW = (w + blockW - 1) / blockW; int gridSizeH = (h + blockH - 1) / blockH; dim3 grid(gridSizeW,gridSizeH); //hipMalloc((void **) &filterImage, w * h * sizeof(Color3f)); //hipMemset(filterImage, 0, w * h * sizeof(Color3f)); hipMalloc((void **) &filterWeights, w * h * sizeof(float)); hipMemset(filterWeights, 0, w * h * sizeof(float)); hipMalloc((void **) &filterOut, w * h * sizeof(Color3f)); hipMemset(filterOut, 0, w * h * sizeof(Color3f)); filterStartTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()); // copy resource to image //std::cout<<"filter copy start"<<std::endl; //nori::copy_surface_to_data << < grid,block >> > (resource, w,h, image); //std::cout<<hipGetErrorString(hipGetLastError())<<std::endl; // wait until finish hipDeviceSynchronize(); // actual filter std::cout<<"filter start"<<std::endl; nori::nl_means_filter_gpu << < grid,block >> > (image, filterWeights, filterOut, w, h, scene); std::cout<<hipGetErrorString(hipGetLastError())<<std::endl; // wait until finish hipDeviceSynchronize(); std::cout<<"filter copy back start"<<std::endl; nori::copy_weighted_data_to_surface << < grid,block >> > (resource, image, w,h, filterOut, filterWeights); std::cout<<hipGetErrorString(hipGetLastError())<<std::endl; filterInvoked = true; } if (hipSuccess==hipStreamQuery(0)){ hipDeviceSynchronize(); auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()) - filterStartTime; std::cout << "filter finished: Took" << float(diff.count())/1000 << " seconds!"<< std::endl ; } } NORI_NAMESPACE_END
a2d6eaecd541ed92e69848c5f1100ffd71e59df5.cu
#include <cuda_runtime_api.h> #include <vector_functions.h> #include <nori/scene.h> #include <nori/camera.h> #include <device_functions.h> #include <nori/independent.h> #include <nori/perspective.h> #include <nori/integrator.h> #include <nori/pathMisIntegrator.h> #include <nori/bvh.h> #include <chrono> #include <unistd.h> NORI_NAMESPACE_BEGIN #define IDX2(i,j,N) (((i)*(N))+(j)) #define COL_NORMSQ(c) (c.x()*c.x() + c.y()*c.y() + c.z()*c.z()) __global__ void copy_surface_to_data(cudaSurfaceObject_t cuda_data, int width, int height, Color3f* data) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) { for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) { float4 val; surf2Dread(&val, cuda_data,(int) sizeof(float4) * x, height - y,cudaBoundaryModeClamp); data[IDX2(x,y,height)] = Color3f(val.x, val.y, val.z); } } } __global__ void copy_weighted_data_to_surface(cudaSurfaceObject_t cuda_data, Color3f* image, int width, int height, Color3f* flt, float* wgtsum) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) { for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) { int idx = IDX2(x,y,height); Color3f val = flt[idx] / wgtsum[idx]; image[idx] = val; val = val.toLinearRGB(); surf2Dwrite(make_float4(val.x(),val.y(),val.z(),1.0f), cuda_data, (int) sizeof(float4) * x, height - y, cudaBoundaryModeClamp); } } } // see lecture 16a-slide 16 __global__ void nl_means_filter_gpu(Color3f* image, float* wgtsum, Color3f* flt, int width, int height, Scene* scene) { const int r = scene->filter_r; const int f = scene->filter_f; const float sigma = scene->filter_sigma; const float sigma2 = sigma*sigma; //const float h2=0.45f*0.45f; const float h = scene->filter_h; const float h2=h*h; for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < width; x += blockDim.x * gridDim.x) { for (int y = blockIdx.y * blockDim.y + threadIdx.y; y < height; y += blockDim.y * gridDim.y) { // loop over neighbors int minX = max(0,x-r); int maxX = min(width-1,x+r); int minY = max(0,y-r); int maxY = min(height-1,y+r); for (int i=minX; i<=maxX; ++i) { for (int j=minY; j<=maxY; ++j) { // here we consider neighbour (i,j) float d2patch = 0; int patchMinX = max(max(-f, -x), -i); int patchMaxX = min(min(f, width-x-1), width-i-1); int patchMinY = max(max(-f,-y), -j); int patchMaxY = min(min(f, height-y-1), height-j-1); // loop over patch of size (2f+1)^2 for (int k=patchMinX;k<=patchMaxX;++k) { for (int l=patchMinY;l<=patchMaxY;++l) { Color3f color_p = image[IDX2(x+k,y+l,height)]; Color3f color_q = image[IDX2(i+k,j+l,height)]; float w = exp(-(k*k+l*l)/(2.0f*sigma2)); float patchv = (COL_NORMSQ((color_q-color_p).eval()))/h2; d2patch += w*patchv; } } d2patch /= ((patchMaxX-patchMinX+1)*(patchMaxY-patchMinY+1)); float wgt = exp(-max(d2patch,0.0f)); int idx = IDX2(x,y,height); wgtsum[idx] += wgt; flt[idx] += wgt*image[IDX2(i,j,height)]; } } } } } /** * * @param resource * @param w * @param h * @param scene scene object on GPU memory */ static float filterInvoked = false; static std::chrono::milliseconds filterStartTime; static float* filterWeights; static Color3f* filterOut; void filter_scene(cudaSurfaceObject_t resource, int w, int h, nori::Scene *scene,Color3f *image) { if (!filterInvoked) { int blockSize; int gridSize; cudaOccupancyMaxPotentialBlockSize(&gridSize,&blockSize,nl_means_filter_gpu,0,w*h); //we want to render 2D blocks not lines int blockW = sqrt(blockSize); int blockH = blockSize/blockW; dim3 block(blockW,blockH); int gridSizeW = (w + blockW - 1) / blockW; int gridSizeH = (h + blockH - 1) / blockH; dim3 grid(gridSizeW,gridSizeH); //cudaMalloc((void **) &filterImage, w * h * sizeof(Color3f)); //cudaMemset(filterImage, 0, w * h * sizeof(Color3f)); cudaMalloc((void **) &filterWeights, w * h * sizeof(float)); cudaMemset(filterWeights, 0, w * h * sizeof(float)); cudaMalloc((void **) &filterOut, w * h * sizeof(Color3f)); cudaMemset(filterOut, 0, w * h * sizeof(Color3f)); filterStartTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()); // copy resource to image //std::cout<<"filter copy start"<<std::endl; //nori::copy_surface_to_data << < grid,block >> > (resource, w,h, image); //std::cout<<cudaGetErrorString(cudaGetLastError())<<std::endl; // wait until finish cudaDeviceSynchronize(); // actual filter std::cout<<"filter start"<<std::endl; nori::nl_means_filter_gpu << < grid,block >> > (image, filterWeights, filterOut, w, h, scene); std::cout<<cudaGetErrorString(cudaGetLastError())<<std::endl; // wait until finish cudaDeviceSynchronize(); std::cout<<"filter copy back start"<<std::endl; nori::copy_weighted_data_to_surface << < grid,block >> > (resource, image, w,h, filterOut, filterWeights); std::cout<<cudaGetErrorString(cudaGetLastError())<<std::endl; filterInvoked = true; } if (cudaSuccess==cudaStreamQuery(0)){ cudaDeviceSynchronize(); auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()) - filterStartTime; std::cout << "filter finished: Took" << float(diff.count())/1000 << " seconds!"<< std::endl ; } } NORI_NAMESPACE_END
one_hot.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/layers/misc/one_hot.hpp" namespace lbann { namespace { /** * On input, output is assumed to be filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (width / bsize) x 1 x 1 */ __global__ void fp_kernel(size_t height, size_t width, const DataType* __restrict__ indices, size_t indices_stride, DataType* __restrict__ output, size_t output_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t col = gid; col < width; col += nthreads) { const auto& ind = indices[col*indices_stride]; if (DataType{0} <= ind && ind < DataType(height)) { const size_t row = static_cast<size_t>(ind); output[row+col*output_ldim] = DataType{1}; } } } } // namespace <anon> template <> void one_hot_layer<data_layout::DATA_PARALLEL, El::Device::GPU> ::fp_compute() { // Local matrices const auto& local_input = dynamic_cast<const GPUMat&>(get_local_prev_activations()); auto& local_output = dynamic_cast<GPUMat&>(get_local_activations()); // Populate one-hot vectors El::Zero(local_output); if (!local_output.IsEmpty()) { const size_t local_height = local_output.Height(); const size_t local_width = local_output.Width(); constexpr size_t block_size = 64; const size_t grid_size = (local_width + block_size - 1) / block_size; hipLaunchKernelGGL(( fp_kernel) , dim3(grid_size), dim3(block_size), 0, El::GPUManager::Stream(), local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim()); } } } // namespace lbann
one_hot.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/layers/misc/one_hot.hpp" namespace lbann { namespace { /** * On input, output is assumed to be filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (width / bsize) x 1 x 1 */ __global__ void fp_kernel(size_t height, size_t width, const DataType* __restrict__ indices, size_t indices_stride, DataType* __restrict__ output, size_t output_ldim) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t col = gid; col < width; col += nthreads) { const auto& ind = indices[col*indices_stride]; if (DataType{0} <= ind && ind < DataType(height)) { const size_t row = static_cast<size_t>(ind); output[row+col*output_ldim] = DataType{1}; } } } } // namespace <anon> template <> void one_hot_layer<data_layout::DATA_PARALLEL, El::Device::GPU> ::fp_compute() { // Local matrices const auto& local_input = dynamic_cast<const GPUMat&>(get_local_prev_activations()); auto& local_output = dynamic_cast<GPUMat&>(get_local_activations()); // Populate one-hot vectors El::Zero(local_output); if (!local_output.IsEmpty()) { const size_t local_height = local_output.Height(); const size_t local_width = local_output.Width(); constexpr size_t block_size = 64; const size_t grid_size = (local_width + block_size - 1) / block_size; fp_kernel <<<grid_size, block_size, 0, El::GPUManager::Stream()>>>( local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim()); } } } // namespace lbann
6eb8f2a3344f6b727ace68ddb2b3f234e6261404.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void getValue(float4 *outdata, float *indata) { // outdata[0] = indata[0]; float4 my4 = make_float4(indata[0], indata[3], indata[1], indata[2]); outdata[0] = my4; }
6eb8f2a3344f6b727ace68ddb2b3f234e6261404.cu
#include "includes.h" __global__ void getValue(float4 *outdata, float *indata) { // outdata[0] = indata[0]; float4 my4 = make_float4(indata[0], indata[3], indata[1], indata[2]); outdata[0] = my4; }
8391485ac83ede677fd9785cb4523b05093a3e05.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "predicateDevice.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_array = NULL; hipMalloc(&d_array, XSIZE*YSIZE); int *d_predicateArrry = NULL; hipMalloc(&d_predicateArrry, XSIZE*YSIZE); int d_numberOfElements = 1; int bit = 1; int bitset = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( predicateDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_predicateArrry,d_numberOfElements,bit,bitset); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( predicateDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_predicateArrry,d_numberOfElements,bit,bitset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( predicateDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_predicateArrry,d_numberOfElements,bit,bitset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8391485ac83ede677fd9785cb4523b05093a3e05.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "predicateDevice.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *d_array = NULL; cudaMalloc(&d_array, XSIZE*YSIZE); int *d_predicateArrry = NULL; cudaMalloc(&d_predicateArrry, XSIZE*YSIZE); int d_numberOfElements = 1; int bit = 1; int bitset = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); predicateDevice<<<gridBlock,threadBlock>>>(d_array,d_predicateArrry,d_numberOfElements,bit,bitset); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { predicateDevice<<<gridBlock,threadBlock>>>(d_array,d_predicateArrry,d_numberOfElements,bit,bitset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { predicateDevice<<<gridBlock,threadBlock>>>(d_array,d_predicateArrry,d_numberOfElements,bit,bitset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5f84ace8d846247d753f96425c249e3b377f7ebe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { using at::cuda::detail::TensorInfo; using at::cuda::detail::getTensorInfo; using at::cuda::detail::IndexToOffset; using at::cuda::detail::canUse32BitIndexMath; // Factor will be 3 for GRU and 4 for LSTM void checkSizes(CheckedFrom c, const TensorArg& input_gates, const TensorArg& hidden_gates, const TensorArg& input_bias, const TensorArg& hidden_bias, int64_t factor, const TensorArg& prev_hidden) { checkDim(c, input_gates, 2); checkSameSize(c, input_gates, hidden_gates); int64_t gates_size = input_gates->size(1); if (input_bias->defined()) { checkDim(c, input_bias, 1); checkNumel(c, input_bias, gates_size); checkSameSize(c, input_bias, hidden_bias); } checkDim(c, prev_hidden, 2); checkNumel(c, prev_hidden, input_gates->size(0) * gates_size / factor); checkAllSameGPU(c, {input_gates, hidden_gates, input_bias, hidden_bias, prev_hidden}); } bool allContiguous(at::TensorList tensors) { return std::all_of(tensors.begin(), tensors.end(), [](const at::Tensor& t) { return !t.defined() || t.is_contiguous(); }); } void getLaunchConfig(dim3* block, dim3* grid, int64_t numel) { int curDevice = -1; hipGetDevice(&curDevice); *block = cuda::getApplyBlock(); TORCH_INTERNAL_ASSERT(cuda::getApplyGrid(numel, *grid, curDevice), "Could not get grid size for pointwise apply."); } template<typename T, typename T2> TensorInfo<T, T2> tryGetTensorInfo(const at::Tensor& t) { return t.defined() ? getTensorInfo<T, T2>(t) : TensorInfo<T, T2>{}; } void collapseDims() {}; template<typename T, typename T2, typename... Args> void collapseDims(TensorInfo<T, T2>& info, Args&... infos) { info.collapseDims(); collapseDims(infos...); } #define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \ D_TENSOR.data[IndexToOffset<scalar_t, index_type, indexing_kind>::get(INDEX, D_TENSOR)] // Biases are always 1D #define DEVICE_BIAS_GET(D_TENSOR, INDEX) \ D_TENSOR.data[IndexToOffset<scalar_t, index_type, 1>::get(INDEX, D_TENSOR)] #define H2F(input) static_cast<accscalar_t>(input) #define F2H(input) static_cast<scalar_t>(input) template<typename T> __device__ __forceinline__ T sigmoid(T in) { T one = static_cast<T>(1.0); return one / (one + ::exp(-in)); } namespace kernel { template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void lstm_cell_forward( TensorInfo<scalar_t, index_type> input, TensorInfo<scalar_t, index_type> hidden, TensorInfo<scalar_t, index_type> bias1, TensorInfo<scalar_t, index_type> bias2, TensorInfo<scalar_t, index_type> _cx, TensorInfo<scalar_t, index_type> _hy, TensorInfo<scalar_t, index_type> _cy, TensorInfo<scalar_t, index_type> workspace, index_type hsz, index_type totalElements) { bool has_bias = bias1.data != nullptr; for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz; scalar_t iig = DEVICE_LINEAR_GET(input, offset+0*hsz); scalar_t ifg = DEVICE_LINEAR_GET(input, offset+1*hsz); scalar_t icg = DEVICE_LINEAR_GET(input, offset+2*hsz); scalar_t iog = DEVICE_LINEAR_GET(input, offset+3*hsz); scalar_t hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz); scalar_t hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz); scalar_t hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz); scalar_t hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz); scalar_t* wig = &DEVICE_LINEAR_GET(workspace, offset+0*hsz); scalar_t* wfg = &DEVICE_LINEAR_GET(workspace, offset+1*hsz); scalar_t* wcg = &DEVICE_LINEAR_GET(workspace, offset+2*hsz); scalar_t* wog = &DEVICE_LINEAR_GET(workspace, offset+3*hsz); scalar_t cx = DEVICE_LINEAR_GET(_cx, linearIndex); scalar_t* hy = &DEVICE_LINEAR_GET(_hy, linearIndex); scalar_t* cy = &DEVICE_LINEAR_GET(_cy, linearIndex); scalar_t b1i, b1f, b1c, b1o; scalar_t b2i, b2f, b2c, b2o; if (has_bias) { b1i = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 0 * hsz); b1f = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 1 * hsz); b1c = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 2 * hsz); b1o = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 3 * hsz); b2i = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 0 * hsz); b2f = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 1 * hsz); b2c = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 2 * hsz); b2o = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 3 * hsz); } else { #ifndef THC_REAL_IS_HALF b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0; b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0; #else b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0); b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0); #endif } accscalar_t ig, fg, cg, og; accscalar_t f_hy, f_cy; ig = sigmoid(H2F(iig) + H2F(hig) + H2F(b1i) + H2F(b2i)); fg = sigmoid(H2F(ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f)); cg = ::tanh(H2F(icg) + H2F(hcg) + H2F(b1c) + H2F(b2c)); og = sigmoid(H2F(iog) + H2F(hog) + H2F(b1o) + H2F(b2o)); f_cy = (fg * H2F(cx)) + (ig * cg); f_hy = og * ::tanh(f_cy); *hy = F2H(f_hy); *cy = F2H(f_cy); //SAVE FOR BACKWARDS //Also need cy and cx but can be saved easily in python *wig = F2H(ig); *wfg = F2H(fg); *wcg = F2H(cg); *wog = F2H(og); } } template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void lstm_cell_backward( TensorInfo<scalar_t, index_type> storage, TensorInfo<scalar_t, index_type> gradInGates, TensorInfo<scalar_t, index_type> _cx, TensorInfo<scalar_t, index_type> _cy, TensorInfo<scalar_t, index_type> gradoutput, TensorInfo<scalar_t, index_type> gradoutputcell, TensorInfo<scalar_t, index_type> gradInputCx, index_type hsz, index_type totalElements) { bool has_gradoutput = gradoutput.data != nullptr; bool has_gradoutputcell = gradoutputcell.data != nullptr; for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz; scalar_t ig = DEVICE_LINEAR_GET(storage, offset+0*hsz); scalar_t fg = DEVICE_LINEAR_GET(storage, offset+1*hsz); scalar_t cg = DEVICE_LINEAR_GET(storage, offset+2*hsz); scalar_t og = DEVICE_LINEAR_GET(storage, offset+3*hsz); scalar_t* ih = &DEVICE_LINEAR_GET(gradInGates, offset+0*hsz); scalar_t* fh = &DEVICE_LINEAR_GET(gradInGates, offset+1*hsz); scalar_t* ch = &DEVICE_LINEAR_GET(gradInGates, offset+2*hsz); scalar_t* oh = &DEVICE_LINEAR_GET(gradInGates, offset+3*hsz); //will return hidden grads here scalar_t cx = DEVICE_LINEAR_GET(_cx, linearIndex); scalar_t cy = DEVICE_LINEAR_GET(_cy, linearIndex); scalar_t* gi = &DEVICE_LINEAR_GET(gradInputCx, linearIndex); accscalar_t go = has_gradoutput ? H2F(DEVICE_LINEAR_GET(gradoutput, linearIndex)) : 0.f; accscalar_t goc = has_gradoutputcell ? H2F(DEVICE_LINEAR_GET(gradoutputcell, linearIndex)) : 0.f; accscalar_t gcx = ::tanh(H2F(cy)); accscalar_t gog = go * gcx; gcx = go * H2F(og) * (1 - gcx*gcx) + goc; accscalar_t gig = gcx * H2F(cg); accscalar_t gfg = gcx * H2F(cx); accscalar_t gcg = gcx * H2F(ig); gcx = gcx * H2F(fg); gig = gig * (1-H2F(ig)) * H2F(ig); gfg = gfg * (1-H2F(fg)) * H2F(fg); gcg = gcg * (1-H2F(cg)*H2F(cg)); gog = gog * (1-H2F(og)) * H2F(og); *ih = F2H(gig); *fh = F2H(gfg); *ch = F2H(gcg); *oh = F2H(gog); *gi = F2H(gcx); } } template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void gru_cell_forward( TensorInfo<scalar_t, index_type> Input, TensorInfo<scalar_t, index_type> Hidden, TensorInfo<scalar_t, index_type> Bias1, TensorInfo<scalar_t, index_type> Bias2, TensorInfo<scalar_t, index_type> _hx, TensorInfo<scalar_t, index_type> _hy, TensorInfo<scalar_t, index_type> storage, index_type hsz, index_type totalElements) { bool has_bias = Bias1.data != nullptr; for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz; scalar_t ir = DEVICE_LINEAR_GET(Input, offset+0*hsz); scalar_t ii = DEVICE_LINEAR_GET(Input, offset+1*hsz); scalar_t in = DEVICE_LINEAR_GET(Input, offset+2*hsz); scalar_t hr = DEVICE_LINEAR_GET(Hidden,offset+0*hsz); scalar_t hi = DEVICE_LINEAR_GET(Hidden,offset+1*hsz); scalar_t hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz); scalar_t hx = DEVICE_LINEAR_GET(_hx, linearIndex); scalar_t* hy = &DEVICE_LINEAR_GET(_hy, linearIndex); scalar_t b1r, b1i, b1n, b2r, b2i, b2n; if (has_bias) { b1r = DEVICE_BIAS_GET(Bias1, linearIndex%hsz+0*hsz); b1i = DEVICE_BIAS_GET(Bias1, linearIndex%hsz+1*hsz); b1n = DEVICE_BIAS_GET(Bias1, linearIndex%hsz+2*hsz); b2r = DEVICE_BIAS_GET(Bias2, linearIndex%hsz+0*hsz); b2i = DEVICE_BIAS_GET(Bias2, linearIndex%hsz+1*hsz); b2n = DEVICE_BIAS_GET(Bias2, linearIndex%hsz+2*hsz); } else { #ifndef THC_REAL_IS_HALF b1r = 0.0; b1i = 0.0; b1n = 0.0; b2r = 0.0; b2i = 0.0; b2n = 0.0; #else b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0); b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0); #endif } offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz; accscalar_t rg, ig, ng; rg = sigmoid(H2F(ir) + H2F(hr) + H2F(b1r) + H2F(b2r)); ig = sigmoid(H2F(ii) + H2F(hi) + H2F(b1i) + H2F(b2i)); ng = H2F(in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) ); ng = ::tanh(ng); *hy = F2H( ng + ig * ( H2F(hx)-ng ) ); //SAVE FOR BACKWARDS DEVICE_LINEAR_GET(storage, offset+0*hsz) = F2H(rg); DEVICE_LINEAR_GET(storage, offset+1*hsz) = F2H(ig); DEVICE_LINEAR_GET(storage, offset+2*hsz) = F2H(ng); DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx; DEVICE_LINEAR_GET(storage, offset+4*hsz) = F2H(H2F(hn) + H2F(b2n)); } } template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void gru_cell_backward( TensorInfo<scalar_t, index_type> gradInInput, TensorInfo<scalar_t, index_type> gradInHidden, TensorInfo<scalar_t, index_type> gradOutput, TensorInfo<scalar_t, index_type> gradInputHx, TensorInfo<scalar_t, index_type> storage, index_type hsz, index_type totalElements) { for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz; scalar_t rg = DEVICE_LINEAR_GET(storage, offset+0*hsz); scalar_t ig = DEVICE_LINEAR_GET(storage, offset+1*hsz); scalar_t ng = DEVICE_LINEAR_GET(storage, offset+2*hsz); scalar_t hx = DEVICE_LINEAR_GET(storage, offset+3*hsz); scalar_t hn = DEVICE_LINEAR_GET(storage, offset+4*hsz); scalar_t go = DEVICE_LINEAR_GET(gradOutput, linearIndex); offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz; accscalar_t gig = H2F(go)*( H2F(hx)-H2F(ng) )*( 1-H2F(ig) )*H2F(ig); accscalar_t ghx = H2F(go)*H2F(ig); accscalar_t gin = H2F(go)*( 1-H2F(ig) )*( 1-H2F(ng)*H2F(ng) ); accscalar_t ghn = gin * H2F(rg); accscalar_t grg = gin *H2F(hn)*( 1-H2F(rg) )*H2F(rg); DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = F2H(grg); DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = F2H(gig); DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = F2H(gin); DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = F2H(grg); DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = F2H(gig); DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = F2H(ghn); DEVICE_LINEAR_GET(gradInputHx, linearIndex) = F2H(ghx); } } #undef DEVICE_LINEAR_GET #undef DEVICE_BIAS_GET #undef H2F #undef F2H } // namespace kernel template<typename scalar_t, typename index_type> void lstm_forward_impl(const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& input_bias, const Tensor& hidden_bias, const Tensor& cx, const Tensor& hy, const Tensor& cy, const Tensor& workspace) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = cx.numel(); getLaunchConfig(&block, &grid, numel); auto input_gatesI = getTensorInfo<scalar_t, index_type>(input_gates); auto hidden_gatesI = getTensorInfo<scalar_t, index_type>(hidden_gates); auto input_biasI = tryGetTensorInfo<scalar_t, index_type>(input_bias); auto hidden_biasI = tryGetTensorInfo<scalar_t, index_type>(hidden_bias); auto cxI = getTensorInfo<scalar_t, index_type>(cx); auto hyI = getTensorInfo<scalar_t, index_type>(hy); auto cyI = getTensorInfo<scalar_t, index_type>(cy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); index_type hidden_size = cxI.sizes[cxI.dims-1]; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (allContiguous({input_gates, hidden_gates, input_bias, hidden_bias, cx, hy, cy, workspace})) { collapseDims(input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, cxI, hyI, cyI, workspaceI); hipLaunchKernelGGL(( kernel::lstm_cell_forward<scalar_t, accscalar_t, index_type, 1>) , dim3(grid), dim3(block), 0, stream, input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, cxI, hyI, cyI, workspaceI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( kernel::lstm_cell_forward<scalar_t, accscalar_t, index_type, 2>) , dim3(grid), dim3(block), 0, stream, input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, cxI, hyI, cyI, workspaceI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } } template<typename scalar_t, typename index_type> void lstm_backward_impl(const Tensor& grad_hy, const Tensor& grad_cy, const Tensor& cx, const Tensor& cy, const Tensor& workspace, const Tensor& grad_gates, const Tensor& grad_cx) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = cx.numel(); getLaunchConfig(&block, &grid, numel); auto grad_hyI = tryGetTensorInfo<scalar_t, index_type>(grad_hy); auto grad_cyI = tryGetTensorInfo<scalar_t, index_type>(grad_cy); auto cxI = getTensorInfo<scalar_t, index_type>(cx); auto cyI = getTensorInfo<scalar_t, index_type>(cy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); auto grad_gatesI = getTensorInfo<scalar_t, index_type>(grad_gates); auto grad_cxI = getTensorInfo<scalar_t, index_type>(grad_cx); index_type hidden_size = cxI.sizes[cxI.dims-1]; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (allContiguous({grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx})) { collapseDims(grad_hyI, grad_cyI, cxI, cyI, workspaceI, grad_gatesI, grad_cxI); hipLaunchKernelGGL(( kernel::lstm_cell_backward<scalar_t, accscalar_t, index_type, 1>) , dim3(grid), dim3(block), 0, stream, workspaceI, grad_gatesI, cxI, cyI, grad_hyI, grad_cyI, grad_cxI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( kernel::lstm_cell_backward<scalar_t, accscalar_t, index_type, 2>) , dim3(grid), dim3(block), 0, stream, workspaceI, grad_gatesI, cxI, cyI, grad_hyI, grad_cyI, grad_cxI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } } template<typename scalar_t, typename index_type> void gru_forward_impl(const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& input_bias, const Tensor& hidden_bias, const Tensor& hx, const Tensor& hy, const Tensor& workspace) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = hx.numel(); getLaunchConfig(&block, &grid, numel); auto input_gatesI = getTensorInfo<scalar_t, index_type>(input_gates); auto hidden_gatesI = getTensorInfo<scalar_t, index_type>(hidden_gates); auto input_biasI = tryGetTensorInfo<scalar_t, index_type>(input_bias); auto hidden_biasI = tryGetTensorInfo<scalar_t, index_type>(hidden_bias); auto hxI = getTensorInfo<scalar_t, index_type>(hx); auto hyI = getTensorInfo<scalar_t, index_type>(hy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); index_type hidden_size = hxI.sizes[hxI.dims-1]; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (allContiguous({input_gates, hidden_gates, input_bias, hidden_bias, hx, hy, workspace})) { collapseDims(input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, hxI, hyI, workspaceI); hipLaunchKernelGGL(( kernel::gru_cell_forward<scalar_t, accscalar_t, index_type, 1>) , dim3(grid), dim3(block), 0, stream, input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, hxI, hyI, workspaceI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( kernel::gru_cell_forward<scalar_t, accscalar_t, index_type, 2>) , dim3(grid), dim3(block), 0, stream, input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, hxI, hyI, workspaceI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } } template<typename scalar_t, typename index_type> void gru_backward_impl(const Tensor& grad_hy, const Tensor& workspace, const Tensor& grad_input_gates, const Tensor& grad_hidden_gates, const Tensor& grad_hx) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = grad_hy.numel(); getLaunchConfig(&block, &grid, numel); auto grad_hyI = getTensorInfo<scalar_t, index_type>(grad_hy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); auto grad_input_gatesI = getTensorInfo<scalar_t, index_type>(grad_input_gates); auto grad_hidden_gatesI = getTensorInfo<scalar_t, index_type>(grad_hidden_gates); auto grad_hxI = getTensorInfo<scalar_t, index_type>(grad_hx); index_type hidden_size = grad_hyI.sizes[grad_hyI.dims-1]; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (allContiguous({grad_hy, workspace, grad_input_gates, grad_hidden_gates, grad_hx})) { collapseDims(grad_hyI, workspaceI, grad_input_gatesI, grad_hidden_gatesI, grad_hxI); hipLaunchKernelGGL(( kernel::gru_cell_backward<scalar_t, accscalar_t, index_type, 1>) , dim3(grid), dim3(block), 0, stream, grad_input_gatesI, grad_hidden_gatesI, grad_hyI, grad_hxI, workspaceI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( kernel::gru_cell_backward<scalar_t, accscalar_t, index_type, 2>) , dim3(grid), dim3(block), 0, stream, grad_input_gatesI, grad_hidden_gatesI, grad_hyI, grad_hxI, workspaceI, hidden_size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } // anonymous namespace // Note [64-bit index math check elision] // It's enough to perform the check for 64-bit math on the largest tensor only. // If 32-bit is enough for it, it will suffice for all other tensors too, and we // can save some work using this trick. std::tuple<Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_cuda( const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& cx, const c10::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt); const Tensor& input_bias = *input_bias_maybe_owned; const Tensor& hidden_bias = c10::value_or_else(hidden_bias_opt, [] {return Tensor();}); checkSizes("_thnn_fused_lstm_cell_cuda", {input_gates, "input_gates", 1}, {hidden_gates, "hidden_gates", 2}, {input_bias, "input_bias", 3}, {hidden_bias, "hidden_bias", 4}, /*factor=*/4, {cx, "prev_hidden", 5}); auto workspace = at::empty_like(input_gates, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto hy = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto cy = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input_gates.scalar_type(), "_thnn_fused_lstm_cell_cuda", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] lstm_forward_impl<scalar_t, int32_t>(input_gates, hidden_gates, input_bias, hidden_bias, cx, hy, cy, workspace); } else { lstm_forward_impl<scalar_t, int64_t>(input_gates, hidden_gates, input_bias, hidden_bias, cx, hy, cy, workspace); } }); return std::make_tuple(hy, cy, workspace); } void checkLSTMBackwardSizes(const TensorArg& grad_hy, const TensorArg& grad_cy, const TensorArg& cx, const TensorArg& cy, const TensorArg& workspace) { CheckedFrom c = "fused_lstm_cell_backward"; const TensorArg& defined_grad = grad_hy->defined() ? grad_hy : grad_cy; checkDim(c, defined_grad, 2); auto exp_size = defined_grad->sizes(); if (grad_hy->defined()) { checkSize(c, grad_hy, exp_size); } if (grad_cy->defined()) { checkSize(c, grad_cy, exp_size); } checkSize(c, cx, exp_size); checkSize(c, cy, exp_size); checkDim(c, workspace, 2); checkNumel(c, workspace, exp_size[0] * exp_size[1] * 4); } std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backward_cuda( const c10::optional<Tensor>& grad_hy_opt, const c10::optional<Tensor>& grad_cy_opt, const Tensor& cx, const Tensor& cy, const Tensor& workspace, bool has_bias) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> grad_hy_maybe_owned = at::borrow_from_optional_tensor(grad_hy_opt); const Tensor& grad_hy = *grad_hy_maybe_owned; const Tensor& grad_cy = c10::value_or_else(grad_cy_opt, [] {return Tensor();}); if (!grad_hy.defined() && !grad_cy.defined()) { return std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor>(); } checkLSTMBackwardSizes({grad_hy, "grad_hy", 1}, {grad_cy, "grad_cy", 2}, {cx, "cx", 3}, {cy, "cy", 4}, {workspace, "workspace", 5}); auto grad_gates = at::empty_like(workspace, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_cx = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(workspace.scalar_type(), "_thnn_fused_lstm_cell_cuda_backward", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] lstm_backward_impl<scalar_t, int32_t>(grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx); } else { lstm_backward_impl<scalar_t, int64_t>(grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx); } }); auto grad_bias = has_bias ? grad_gates.sum(0, /*keepdim=*/false) : at::Tensor{}; return std::make_tuple(grad_gates, grad_gates, grad_cx, grad_bias, grad_bias); } static constexpr int64_t GRU_WORKSPACE_MULTIPLIER = 5; std::tuple<Tensor, Tensor> _thnn_fused_gru_cell_cuda( const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& hx, const c10::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt); const Tensor& input_bias = *input_bias_maybe_owned; const Tensor& hidden_bias = c10::value_or_else(hidden_bias_opt, [] {return Tensor();}); checkSizes("_thnn_fused_gru_cell_cuda", {input_gates, "input_gates", 1}, {hidden_gates, "hidden_gates", 2}, {input_bias, "input_bias", 3}, {hidden_bias, "hidden_bias", 4}, /*factor=*/3, {hx, "prev_hidden", 5}); auto workspace = at::empty({hx.size(0), hx.size(1) * GRU_WORKSPACE_MULTIPLIER}, hx.options()); auto hy = at::empty_like(hx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input_gates.scalar_type(), "_thnn_fused_gru_cell_cuda", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] gru_forward_impl<scalar_t, int32_t>(input_gates, hidden_gates, input_bias, hidden_bias, hx, hy, workspace); } else { gru_forward_impl<scalar_t, int64_t>(input_gates, hidden_gates, input_bias, hidden_bias, hx, hy, workspace); } }); return std::make_tuple(hy, workspace); } void checkGRUBackwardSizes(const TensorArg& grad_hy, const TensorArg& workspace) { CheckedFrom c = "fused_gru_cell_backward"; checkDim(c, grad_hy, 2); checkSize(c, workspace, {grad_hy->size(0), grad_hy->size(1) * GRU_WORKSPACE_MULTIPLIER}); } std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_gru_cell_backward_cuda( const Tensor& grad_hy, const Tensor& workspace, bool has_bias) { checkGRUBackwardSizes({grad_hy, "grad_hy", 1}, {workspace, "workspace", 2}); int64_t hidden_size = workspace.size(1) / GRU_WORKSPACE_MULTIPLIER; auto grad_input_gates = at::empty({workspace.size(0), hidden_size * 3}, workspace.options()); auto grad_hidden_gates = at::empty({workspace.size(0), hidden_size * 3}, workspace.options()); auto grad_hx = at::empty_like(grad_hy, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_hy.scalar_type(), "_thnn_fused_gru_cell_cuda_backward", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] gru_backward_impl<scalar_t, int32_t>(grad_hy, workspace, grad_input_gates, grad_hidden_gates, grad_hx); } else { gru_backward_impl<scalar_t, int64_t>(grad_hy, workspace, grad_input_gates, grad_hidden_gates, grad_hx); } }); at::Tensor grad_input_bias, grad_hidden_bias; if (has_bias) { grad_input_bias = grad_input_gates.sum(0, /*keepdim=*/false); grad_hidden_bias = grad_hidden_gates.sum(0, /*keepdim=*/false); } return std::make_tuple(grad_input_gates, grad_hidden_gates, grad_hx, grad_input_bias, grad_hidden_bias); } }} // namespace at::native
5f84ace8d846247d753f96425c249e3b377f7ebe.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { using at::cuda::detail::TensorInfo; using at::cuda::detail::getTensorInfo; using at::cuda::detail::IndexToOffset; using at::cuda::detail::canUse32BitIndexMath; // Factor will be 3 for GRU and 4 for LSTM void checkSizes(CheckedFrom c, const TensorArg& input_gates, const TensorArg& hidden_gates, const TensorArg& input_bias, const TensorArg& hidden_bias, int64_t factor, const TensorArg& prev_hidden) { checkDim(c, input_gates, 2); checkSameSize(c, input_gates, hidden_gates); int64_t gates_size = input_gates->size(1); if (input_bias->defined()) { checkDim(c, input_bias, 1); checkNumel(c, input_bias, gates_size); checkSameSize(c, input_bias, hidden_bias); } checkDim(c, prev_hidden, 2); checkNumel(c, prev_hidden, input_gates->size(0) * gates_size / factor); checkAllSameGPU(c, {input_gates, hidden_gates, input_bias, hidden_bias, prev_hidden}); } bool allContiguous(at::TensorList tensors) { return std::all_of(tensors.begin(), tensors.end(), [](const at::Tensor& t) { return !t.defined() || t.is_contiguous(); }); } void getLaunchConfig(dim3* block, dim3* grid, int64_t numel) { int curDevice = -1; cudaGetDevice(&curDevice); *block = cuda::getApplyBlock(); TORCH_INTERNAL_ASSERT(cuda::getApplyGrid(numel, *grid, curDevice), "Could not get grid size for pointwise apply."); } template<typename T, typename T2> TensorInfo<T, T2> tryGetTensorInfo(const at::Tensor& t) { return t.defined() ? getTensorInfo<T, T2>(t) : TensorInfo<T, T2>{}; } void collapseDims() {}; template<typename T, typename T2, typename... Args> void collapseDims(TensorInfo<T, T2>& info, Args&... infos) { info.collapseDims(); collapseDims(infos...); } #define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \ D_TENSOR.data[IndexToOffset<scalar_t, index_type, indexing_kind>::get(INDEX, D_TENSOR)] // Biases are always 1D #define DEVICE_BIAS_GET(D_TENSOR, INDEX) \ D_TENSOR.data[IndexToOffset<scalar_t, index_type, 1>::get(INDEX, D_TENSOR)] #define H2F(input) static_cast<accscalar_t>(input) #define F2H(input) static_cast<scalar_t>(input) template<typename T> __device__ __forceinline__ T sigmoid(T in) { T one = static_cast<T>(1.0); return one / (one + ::exp(-in)); } namespace kernel { template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void lstm_cell_forward( TensorInfo<scalar_t, index_type> input, TensorInfo<scalar_t, index_type> hidden, TensorInfo<scalar_t, index_type> bias1, TensorInfo<scalar_t, index_type> bias2, TensorInfo<scalar_t, index_type> _cx, TensorInfo<scalar_t, index_type> _hy, TensorInfo<scalar_t, index_type> _cy, TensorInfo<scalar_t, index_type> workspace, index_type hsz, index_type totalElements) { bool has_bias = bias1.data != nullptr; for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz; scalar_t iig = DEVICE_LINEAR_GET(input, offset+0*hsz); scalar_t ifg = DEVICE_LINEAR_GET(input, offset+1*hsz); scalar_t icg = DEVICE_LINEAR_GET(input, offset+2*hsz); scalar_t iog = DEVICE_LINEAR_GET(input, offset+3*hsz); scalar_t hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz); scalar_t hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz); scalar_t hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz); scalar_t hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz); scalar_t* wig = &DEVICE_LINEAR_GET(workspace, offset+0*hsz); scalar_t* wfg = &DEVICE_LINEAR_GET(workspace, offset+1*hsz); scalar_t* wcg = &DEVICE_LINEAR_GET(workspace, offset+2*hsz); scalar_t* wog = &DEVICE_LINEAR_GET(workspace, offset+3*hsz); scalar_t cx = DEVICE_LINEAR_GET(_cx, linearIndex); scalar_t* hy = &DEVICE_LINEAR_GET(_hy, linearIndex); scalar_t* cy = &DEVICE_LINEAR_GET(_cy, linearIndex); scalar_t b1i, b1f, b1c, b1o; scalar_t b2i, b2f, b2c, b2o; if (has_bias) { b1i = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 0 * hsz); b1f = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 1 * hsz); b1c = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 2 * hsz); b1o = DEVICE_BIAS_GET(bias1, linearIndex % hsz + 3 * hsz); b2i = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 0 * hsz); b2f = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 1 * hsz); b2c = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 2 * hsz); b2o = DEVICE_BIAS_GET(bias2, linearIndex % hsz + 3 * hsz); } else { #ifndef THC_REAL_IS_HALF b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0; b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0; #else b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0); b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0); #endif } accscalar_t ig, fg, cg, og; accscalar_t f_hy, f_cy; ig = sigmoid(H2F(iig) + H2F(hig) + H2F(b1i) + H2F(b2i)); fg = sigmoid(H2F(ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f)); cg = ::tanh(H2F(icg) + H2F(hcg) + H2F(b1c) + H2F(b2c)); og = sigmoid(H2F(iog) + H2F(hog) + H2F(b1o) + H2F(b2o)); f_cy = (fg * H2F(cx)) + (ig * cg); f_hy = og * ::tanh(f_cy); *hy = F2H(f_hy); *cy = F2H(f_cy); //SAVE FOR BACKWARDS //Also need cy and cx but can be saved easily in python *wig = F2H(ig); *wfg = F2H(fg); *wcg = F2H(cg); *wog = F2H(og); } } template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void lstm_cell_backward( TensorInfo<scalar_t, index_type> storage, TensorInfo<scalar_t, index_type> gradInGates, TensorInfo<scalar_t, index_type> _cx, TensorInfo<scalar_t, index_type> _cy, TensorInfo<scalar_t, index_type> gradoutput, TensorInfo<scalar_t, index_type> gradoutputcell, TensorInfo<scalar_t, index_type> gradInputCx, index_type hsz, index_type totalElements) { bool has_gradoutput = gradoutput.data != nullptr; bool has_gradoutputcell = gradoutputcell.data != nullptr; for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz; scalar_t ig = DEVICE_LINEAR_GET(storage, offset+0*hsz); scalar_t fg = DEVICE_LINEAR_GET(storage, offset+1*hsz); scalar_t cg = DEVICE_LINEAR_GET(storage, offset+2*hsz); scalar_t og = DEVICE_LINEAR_GET(storage, offset+3*hsz); scalar_t* ih = &DEVICE_LINEAR_GET(gradInGates, offset+0*hsz); scalar_t* fh = &DEVICE_LINEAR_GET(gradInGates, offset+1*hsz); scalar_t* ch = &DEVICE_LINEAR_GET(gradInGates, offset+2*hsz); scalar_t* oh = &DEVICE_LINEAR_GET(gradInGates, offset+3*hsz); //will return hidden grads here scalar_t cx = DEVICE_LINEAR_GET(_cx, linearIndex); scalar_t cy = DEVICE_LINEAR_GET(_cy, linearIndex); scalar_t* gi = &DEVICE_LINEAR_GET(gradInputCx, linearIndex); accscalar_t go = has_gradoutput ? H2F(DEVICE_LINEAR_GET(gradoutput, linearIndex)) : 0.f; accscalar_t goc = has_gradoutputcell ? H2F(DEVICE_LINEAR_GET(gradoutputcell, linearIndex)) : 0.f; accscalar_t gcx = ::tanh(H2F(cy)); accscalar_t gog = go * gcx; gcx = go * H2F(og) * (1 - gcx*gcx) + goc; accscalar_t gig = gcx * H2F(cg); accscalar_t gfg = gcx * H2F(cx); accscalar_t gcg = gcx * H2F(ig); gcx = gcx * H2F(fg); gig = gig * (1-H2F(ig)) * H2F(ig); gfg = gfg * (1-H2F(fg)) * H2F(fg); gcg = gcg * (1-H2F(cg)*H2F(cg)); gog = gog * (1-H2F(og)) * H2F(og); *ih = F2H(gig); *fh = F2H(gfg); *ch = F2H(gcg); *oh = F2H(gog); *gi = F2H(gcx); } } template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void gru_cell_forward( TensorInfo<scalar_t, index_type> Input, TensorInfo<scalar_t, index_type> Hidden, TensorInfo<scalar_t, index_type> Bias1, TensorInfo<scalar_t, index_type> Bias2, TensorInfo<scalar_t, index_type> _hx, TensorInfo<scalar_t, index_type> _hy, TensorInfo<scalar_t, index_type> storage, index_type hsz, index_type totalElements) { bool has_bias = Bias1.data != nullptr; for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz; scalar_t ir = DEVICE_LINEAR_GET(Input, offset+0*hsz); scalar_t ii = DEVICE_LINEAR_GET(Input, offset+1*hsz); scalar_t in = DEVICE_LINEAR_GET(Input, offset+2*hsz); scalar_t hr = DEVICE_LINEAR_GET(Hidden,offset+0*hsz); scalar_t hi = DEVICE_LINEAR_GET(Hidden,offset+1*hsz); scalar_t hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz); scalar_t hx = DEVICE_LINEAR_GET(_hx, linearIndex); scalar_t* hy = &DEVICE_LINEAR_GET(_hy, linearIndex); scalar_t b1r, b1i, b1n, b2r, b2i, b2n; if (has_bias) { b1r = DEVICE_BIAS_GET(Bias1, linearIndex%hsz+0*hsz); b1i = DEVICE_BIAS_GET(Bias1, linearIndex%hsz+1*hsz); b1n = DEVICE_BIAS_GET(Bias1, linearIndex%hsz+2*hsz); b2r = DEVICE_BIAS_GET(Bias2, linearIndex%hsz+0*hsz); b2i = DEVICE_BIAS_GET(Bias2, linearIndex%hsz+1*hsz); b2n = DEVICE_BIAS_GET(Bias2, linearIndex%hsz+2*hsz); } else { #ifndef THC_REAL_IS_HALF b1r = 0.0; b1i = 0.0; b1n = 0.0; b2r = 0.0; b2i = 0.0; b2n = 0.0; #else b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0); b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0); #endif } offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz; accscalar_t rg, ig, ng; rg = sigmoid(H2F(ir) + H2F(hr) + H2F(b1r) + H2F(b2r)); ig = sigmoid(H2F(ii) + H2F(hi) + H2F(b1i) + H2F(b2i)); ng = H2F(in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) ); ng = ::tanh(ng); *hy = F2H( ng + ig * ( H2F(hx)-ng ) ); //SAVE FOR BACKWARDS DEVICE_LINEAR_GET(storage, offset+0*hsz) = F2H(rg); DEVICE_LINEAR_GET(storage, offset+1*hsz) = F2H(ig); DEVICE_LINEAR_GET(storage, offset+2*hsz) = F2H(ng); DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx; DEVICE_LINEAR_GET(storage, offset+4*hsz) = F2H(H2F(hn) + H2F(b2n)); } } template <typename scalar_t, typename accscalar_t, typename index_type, int indexing_kind> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(512, 4) #endif __global__ void gru_cell_backward( TensorInfo<scalar_t, index_type> gradInInput, TensorInfo<scalar_t, index_type> gradInHidden, TensorInfo<scalar_t, index_type> gradOutput, TensorInfo<scalar_t, index_type> gradInputHx, TensorInfo<scalar_t, index_type> storage, index_type hsz, index_type totalElements) { for (index_type linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) { index_type offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz; scalar_t rg = DEVICE_LINEAR_GET(storage, offset+0*hsz); scalar_t ig = DEVICE_LINEAR_GET(storage, offset+1*hsz); scalar_t ng = DEVICE_LINEAR_GET(storage, offset+2*hsz); scalar_t hx = DEVICE_LINEAR_GET(storage, offset+3*hsz); scalar_t hn = DEVICE_LINEAR_GET(storage, offset+4*hsz); scalar_t go = DEVICE_LINEAR_GET(gradOutput, linearIndex); offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz; accscalar_t gig = H2F(go)*( H2F(hx)-H2F(ng) )*( 1-H2F(ig) )*H2F(ig); accscalar_t ghx = H2F(go)*H2F(ig); accscalar_t gin = H2F(go)*( 1-H2F(ig) )*( 1-H2F(ng)*H2F(ng) ); accscalar_t ghn = gin * H2F(rg); accscalar_t grg = gin *H2F(hn)*( 1-H2F(rg) )*H2F(rg); DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = F2H(grg); DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = F2H(gig); DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = F2H(gin); DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = F2H(grg); DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = F2H(gig); DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = F2H(ghn); DEVICE_LINEAR_GET(gradInputHx, linearIndex) = F2H(ghx); } } #undef DEVICE_LINEAR_GET #undef DEVICE_BIAS_GET #undef H2F #undef F2H } // namespace kernel template<typename scalar_t, typename index_type> void lstm_forward_impl(const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& input_bias, const Tensor& hidden_bias, const Tensor& cx, const Tensor& hy, const Tensor& cy, const Tensor& workspace) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = cx.numel(); getLaunchConfig(&block, &grid, numel); auto input_gatesI = getTensorInfo<scalar_t, index_type>(input_gates); auto hidden_gatesI = getTensorInfo<scalar_t, index_type>(hidden_gates); auto input_biasI = tryGetTensorInfo<scalar_t, index_type>(input_bias); auto hidden_biasI = tryGetTensorInfo<scalar_t, index_type>(hidden_bias); auto cxI = getTensorInfo<scalar_t, index_type>(cx); auto hyI = getTensorInfo<scalar_t, index_type>(hy); auto cyI = getTensorInfo<scalar_t, index_type>(cy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); index_type hidden_size = cxI.sizes[cxI.dims-1]; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (allContiguous({input_gates, hidden_gates, input_bias, hidden_bias, cx, hy, cy, workspace})) { collapseDims(input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, cxI, hyI, cyI, workspaceI); kernel::lstm_cell_forward<scalar_t, accscalar_t, index_type, 1> <<<grid, block, 0, stream>>> (input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, cxI, hyI, cyI, workspaceI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { kernel::lstm_cell_forward<scalar_t, accscalar_t, index_type, 2> <<<grid, block, 0, stream>>> (input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, cxI, hyI, cyI, workspaceI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } template<typename scalar_t, typename index_type> void lstm_backward_impl(const Tensor& grad_hy, const Tensor& grad_cy, const Tensor& cx, const Tensor& cy, const Tensor& workspace, const Tensor& grad_gates, const Tensor& grad_cx) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = cx.numel(); getLaunchConfig(&block, &grid, numel); auto grad_hyI = tryGetTensorInfo<scalar_t, index_type>(grad_hy); auto grad_cyI = tryGetTensorInfo<scalar_t, index_type>(grad_cy); auto cxI = getTensorInfo<scalar_t, index_type>(cx); auto cyI = getTensorInfo<scalar_t, index_type>(cy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); auto grad_gatesI = getTensorInfo<scalar_t, index_type>(grad_gates); auto grad_cxI = getTensorInfo<scalar_t, index_type>(grad_cx); index_type hidden_size = cxI.sizes[cxI.dims-1]; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (allContiguous({grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx})) { collapseDims(grad_hyI, grad_cyI, cxI, cyI, workspaceI, grad_gatesI, grad_cxI); kernel::lstm_cell_backward<scalar_t, accscalar_t, index_type, 1> <<<grid, block, 0, stream>>> (workspaceI, grad_gatesI, cxI, cyI, grad_hyI, grad_cyI, grad_cxI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { kernel::lstm_cell_backward<scalar_t, accscalar_t, index_type, 2> <<<grid, block, 0, stream>>> (workspaceI, grad_gatesI, cxI, cyI, grad_hyI, grad_cyI, grad_cxI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } template<typename scalar_t, typename index_type> void gru_forward_impl(const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& input_bias, const Tensor& hidden_bias, const Tensor& hx, const Tensor& hy, const Tensor& workspace) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = hx.numel(); getLaunchConfig(&block, &grid, numel); auto input_gatesI = getTensorInfo<scalar_t, index_type>(input_gates); auto hidden_gatesI = getTensorInfo<scalar_t, index_type>(hidden_gates); auto input_biasI = tryGetTensorInfo<scalar_t, index_type>(input_bias); auto hidden_biasI = tryGetTensorInfo<scalar_t, index_type>(hidden_bias); auto hxI = getTensorInfo<scalar_t, index_type>(hx); auto hyI = getTensorInfo<scalar_t, index_type>(hy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); index_type hidden_size = hxI.sizes[hxI.dims-1]; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (allContiguous({input_gates, hidden_gates, input_bias, hidden_bias, hx, hy, workspace})) { collapseDims(input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, hxI, hyI, workspaceI); kernel::gru_cell_forward<scalar_t, accscalar_t, index_type, 1> <<<grid, block, 0, stream>>> (input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, hxI, hyI, workspaceI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { kernel::gru_cell_forward<scalar_t, accscalar_t, index_type, 2> <<<grid, block, 0, stream>>> (input_gatesI, hidden_gatesI, input_biasI, hidden_biasI, hxI, hyI, workspaceI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } template<typename scalar_t, typename index_type> void gru_backward_impl(const Tensor& grad_hy, const Tensor& workspace, const Tensor& grad_input_gates, const Tensor& grad_hidden_gates, const Tensor& grad_hx) { using accscalar_t = acc_type<scalar_t, /*is_cuda=*/true>; dim3 block, grid; int64_t numel = grad_hy.numel(); getLaunchConfig(&block, &grid, numel); auto grad_hyI = getTensorInfo<scalar_t, index_type>(grad_hy); auto workspaceI = getTensorInfo<scalar_t, index_type>(workspace); auto grad_input_gatesI = getTensorInfo<scalar_t, index_type>(grad_input_gates); auto grad_hidden_gatesI = getTensorInfo<scalar_t, index_type>(grad_hidden_gates); auto grad_hxI = getTensorInfo<scalar_t, index_type>(grad_hx); index_type hidden_size = grad_hyI.sizes[grad_hyI.dims-1]; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (allContiguous({grad_hy, workspace, grad_input_gates, grad_hidden_gates, grad_hx})) { collapseDims(grad_hyI, workspaceI, grad_input_gatesI, grad_hidden_gatesI, grad_hxI); kernel::gru_cell_backward<scalar_t, accscalar_t, index_type, 1> <<<grid, block, 0, stream>>> (grad_input_gatesI, grad_hidden_gatesI, grad_hyI, grad_hxI, workspaceI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { kernel::gru_cell_backward<scalar_t, accscalar_t, index_type, 2> <<<grid, block, 0, stream>>> (grad_input_gatesI, grad_hidden_gatesI, grad_hyI, grad_hxI, workspaceI, hidden_size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } // anonymous namespace // Note [64-bit index math check elision] // It's enough to perform the check for 64-bit math on the largest tensor only. // If 32-bit is enough for it, it will suffice for all other tensors too, and we // can save some work using this trick. std::tuple<Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_cuda( const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& cx, const c10::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt); const Tensor& input_bias = *input_bias_maybe_owned; const Tensor& hidden_bias = c10::value_or_else(hidden_bias_opt, [] {return Tensor();}); checkSizes("_thnn_fused_lstm_cell_cuda", {input_gates, "input_gates", 1}, {hidden_gates, "hidden_gates", 2}, {input_bias, "input_bias", 3}, {hidden_bias, "hidden_bias", 4}, /*factor=*/4, {cx, "prev_hidden", 5}); auto workspace = at::empty_like(input_gates, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto hy = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto cy = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input_gates.scalar_type(), "_thnn_fused_lstm_cell_cuda", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] lstm_forward_impl<scalar_t, int32_t>(input_gates, hidden_gates, input_bias, hidden_bias, cx, hy, cy, workspace); } else { lstm_forward_impl<scalar_t, int64_t>(input_gates, hidden_gates, input_bias, hidden_bias, cx, hy, cy, workspace); } }); return std::make_tuple(hy, cy, workspace); } void checkLSTMBackwardSizes(const TensorArg& grad_hy, const TensorArg& grad_cy, const TensorArg& cx, const TensorArg& cy, const TensorArg& workspace) { CheckedFrom c = "fused_lstm_cell_backward"; const TensorArg& defined_grad = grad_hy->defined() ? grad_hy : grad_cy; checkDim(c, defined_grad, 2); auto exp_size = defined_grad->sizes(); if (grad_hy->defined()) { checkSize(c, grad_hy, exp_size); } if (grad_cy->defined()) { checkSize(c, grad_cy, exp_size); } checkSize(c, cx, exp_size); checkSize(c, cy, exp_size); checkDim(c, workspace, 2); checkNumel(c, workspace, exp_size[0] * exp_size[1] * 4); } std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backward_cuda( const c10::optional<Tensor>& grad_hy_opt, const c10::optional<Tensor>& grad_cy_opt, const Tensor& cx, const Tensor& cy, const Tensor& workspace, bool has_bias) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> grad_hy_maybe_owned = at::borrow_from_optional_tensor(grad_hy_opt); const Tensor& grad_hy = *grad_hy_maybe_owned; const Tensor& grad_cy = c10::value_or_else(grad_cy_opt, [] {return Tensor();}); if (!grad_hy.defined() && !grad_cy.defined()) { return std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor>(); } checkLSTMBackwardSizes({grad_hy, "grad_hy", 1}, {grad_cy, "grad_cy", 2}, {cx, "cx", 3}, {cy, "cy", 4}, {workspace, "workspace", 5}); auto grad_gates = at::empty_like(workspace, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto grad_cx = at::empty_like(cx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(workspace.scalar_type(), "_thnn_fused_lstm_cell_cuda_backward", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] lstm_backward_impl<scalar_t, int32_t>(grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx); } else { lstm_backward_impl<scalar_t, int64_t>(grad_hy, grad_cy, cx, cy, workspace, grad_gates, grad_cx); } }); auto grad_bias = has_bias ? grad_gates.sum(0, /*keepdim=*/false) : at::Tensor{}; return std::make_tuple(grad_gates, grad_gates, grad_cx, grad_bias, grad_bias); } static constexpr int64_t GRU_WORKSPACE_MULTIPLIER = 5; std::tuple<Tensor, Tensor> _thnn_fused_gru_cell_cuda( const Tensor& input_gates, const Tensor& hidden_gates, const Tensor& hx, const c10::optional<Tensor>& input_bias_opt, const c10::optional<Tensor>& hidden_bias_opt) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> input_bias_maybe_owned = at::borrow_from_optional_tensor(input_bias_opt); const Tensor& input_bias = *input_bias_maybe_owned; const Tensor& hidden_bias = c10::value_or_else(hidden_bias_opt, [] {return Tensor();}); checkSizes("_thnn_fused_gru_cell_cuda", {input_gates, "input_gates", 1}, {hidden_gates, "hidden_gates", 2}, {input_bias, "input_bias", 3}, {hidden_bias, "hidden_bias", 4}, /*factor=*/3, {hx, "prev_hidden", 5}); auto workspace = at::empty({hx.size(0), hx.size(1) * GRU_WORKSPACE_MULTIPLIER}, hx.options()); auto hy = at::empty_like(hx, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input_gates.scalar_type(), "_thnn_fused_gru_cell_cuda", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] gru_forward_impl<scalar_t, int32_t>(input_gates, hidden_gates, input_bias, hidden_bias, hx, hy, workspace); } else { gru_forward_impl<scalar_t, int64_t>(input_gates, hidden_gates, input_bias, hidden_bias, hx, hy, workspace); } }); return std::make_tuple(hy, workspace); } void checkGRUBackwardSizes(const TensorArg& grad_hy, const TensorArg& workspace) { CheckedFrom c = "fused_gru_cell_backward"; checkDim(c, grad_hy, 2); checkSize(c, workspace, {grad_hy->size(0), grad_hy->size(1) * GRU_WORKSPACE_MULTIPLIER}); } std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_gru_cell_backward_cuda( const Tensor& grad_hy, const Tensor& workspace, bool has_bias) { checkGRUBackwardSizes({grad_hy, "grad_hy", 1}, {workspace, "workspace", 2}); int64_t hidden_size = workspace.size(1) / GRU_WORKSPACE_MULTIPLIER; auto grad_input_gates = at::empty({workspace.size(0), hidden_size * 3}, workspace.options()); auto grad_hidden_gates = at::empty({workspace.size(0), hidden_size * 3}, workspace.options()); auto grad_hx = at::empty_like(grad_hy, LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_hy.scalar_type(), "_thnn_fused_gru_cell_cuda_backward", [&] { if (canUse32BitIndexMath(workspace)) { // See Note [64-bit index math check elision] gru_backward_impl<scalar_t, int32_t>(grad_hy, workspace, grad_input_gates, grad_hidden_gates, grad_hx); } else { gru_backward_impl<scalar_t, int64_t>(grad_hy, workspace, grad_input_gates, grad_hidden_gates, grad_hx); } }); at::Tensor grad_input_bias, grad_hidden_bias; if (has_bias) { grad_input_bias = grad_input_gates.sum(0, /*keepdim=*/false); grad_hidden_bias = grad_hidden_gates.sum(0, /*keepdim=*/false); } return std::make_tuple(grad_input_gates, grad_hidden_gates, grad_hx, grad_input_bias, grad_hidden_bias); } }} // namespace at::native
0ac2c8ccae96680bb39c0ac6fdcadcf59c1ac77c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <cmath> #include <cstring> #include <algorithm> using namespace std; __global__ void initMatrix(float *A, int n = 512) { uint i = (blockIdx.y * blockDim.y) + threadIdx.y; uint j = (blockIdx.x * blockDim.x) + threadIdx.x; // printf("j: %d, i: %d\n", i, j); if(i == 0 || i == n-1 || j == 0 || j == n-1) A[i*n+j] = cosf((4.0 * M_PI * i) / (n - 1.0)) \ * cosf((4.0 * M_PI * j) / (n - 1.0)); else A[i*n+j] = 0; } __global__ void jacobiIteration(float *A, int n = 512, float eps = 0.01) { __shared__ maxeps = eps; float A_nghbrs[4]; uint i = (blockIdx.y * blockDim.y) + threadIdx.y; uint j = (blockIdx.x * blockDim.x) + threadIdx.x; for(int k=0; k<iterations; k++) { __syncthreads(); A_nghbrs[0] = A[(i-1)*n+j]; A_nghbrs[1] = A[(i+1)*n+j]; A_nghbrs[2] = A[i*n+j-1]; A_nghbrs[3] = A[i*n+j+1]; __syncthreads(); A[i*n+j] = (A_nghbrs[0] + A_nghbrs[1] + A_nghbrs[2] + A_nghbrs[3]) / 4.0; } } void printMatrix(float *A, int n = 512) { for(int i=0; i<n; i++) { for(int j=0; j<n-1; j++) { printf("%f;", A[i*n+j]); } printf("%f\n", A[i*n+n-1]); } } int main(int argc, char *argv[]) { unsigned int N; int iterations = 36; if (argc > 1) N = atoi(argv[1]); else N = 512; fprintf(stderr, "Using grid size %dx%d\n", N, N); float *A; hipMallocManaged(&A, sizeof *A *N*N); dim3 nt(8,8); dim3 nb(N/nt.x, N/nt.y); hipLaunchKernelGGL(( initMatrix), dim3(nb), dim3(nt), 0, 0, A, N); hipError_t err = hipGetLastError(); if (err != hipSuccess) fprintf(stderr, "Error: %s\n", hipGetErrorString(err)); hipDeviceSynchronize(); printMatrix(A, N); hipLaunchKernelGGL(( jacobiIteration), dim3(nb), dim3(nt), 0, 0, A, N, iterations); err = hipGetLastError(); if (err != hipSuccess) fprintf(stderr, "Error: %s\n", hipGetErrorString(err)); hipDeviceSynchronize(); printMatrix(A, N); fprintf(stderr, "iterations: %d\n", iterations); hipFree(A); return 0; }
0ac2c8ccae96680bb39c0ac6fdcadcf59c1ac77c.cu
#include <stdlib.h> #include <stdio.h> #include <cmath> #include <cstring> #include <algorithm> using namespace std; __global__ void initMatrix(float *A, int n = 512) { uint i = (blockIdx.y * blockDim.y) + threadIdx.y; uint j = (blockIdx.x * blockDim.x) + threadIdx.x; // printf("j: %d, i: %d\n", i, j); if(i == 0 || i == n-1 || j == 0 || j == n-1) A[i*n+j] = cosf((4.0 * M_PI * i) / (n - 1.0)) \ * cosf((4.0 * M_PI * j) / (n - 1.0)); else A[i*n+j] = 0; } __global__ void jacobiIteration(float *A, int n = 512, float eps = 0.01) { __shared__ maxeps = eps; float A_nghbrs[4]; uint i = (blockIdx.y * blockDim.y) + threadIdx.y; uint j = (blockIdx.x * blockDim.x) + threadIdx.x; for(int k=0; k<iterations; k++) { __syncthreads(); A_nghbrs[0] = A[(i-1)*n+j]; A_nghbrs[1] = A[(i+1)*n+j]; A_nghbrs[2] = A[i*n+j-1]; A_nghbrs[3] = A[i*n+j+1]; __syncthreads(); A[i*n+j] = (A_nghbrs[0] + A_nghbrs[1] + A_nghbrs[2] + A_nghbrs[3]) / 4.0; } } void printMatrix(float *A, int n = 512) { for(int i=0; i<n; i++) { for(int j=0; j<n-1; j++) { printf("%f;", A[i*n+j]); } printf("%f\n", A[i*n+n-1]); } } int main(int argc, char *argv[]) { unsigned int N; int iterations = 36; if (argc > 1) N = atoi(argv[1]); else N = 512; fprintf(stderr, "Using grid size %dx%d\n", N, N); float *A; cudaMallocManaged(&A, sizeof *A *N*N); dim3 nt(8,8); dim3 nb(N/nt.x, N/nt.y); initMatrix<<<nb, nt>>>(A, N); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) fprintf(stderr, "Error: %s\n", cudaGetErrorString(err)); cudaDeviceSynchronize(); printMatrix(A, N); jacobiIteration<<<nb, nt>>>(A, N, iterations); err = cudaGetLastError(); if (err != cudaSuccess) fprintf(stderr, "Error: %s\n", cudaGetErrorString(err)); cudaDeviceSynchronize(); printMatrix(A, N); fprintf(stderr, "iterations: %d\n", iterations); cudaFree(A); return 0; }
2a1474320929293a130f2ee28d67720a2fd7e47b.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include"tokura_blas_define.h" #include"tokura_blas_const.h" //#include"tokura_tuned_thread_parameters.h" int tokura_dgeev_batched_MWB(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount); int tokura_dgeev_batched_SWBMWB(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount); int tokura_dgeev_batched_MWB_tune(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount, float* time); int tokura_dgeev_batched_SWB_tune(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount,float* time); void tokuraCreate(tokuraInternalhandle_t** tokurahandle) { *tokurahandle = (tokuraInternalhandle_t*)malloc(sizeof(tokuraInternalhandle_t)); hipDeviceProp_t dev; hipGetDeviceProperties(&dev, 0); (*tokurahandle)->WARP_SIZE = dev.warpSize; (*tokurahandle)->sharedsize = dev.sharedMemPerBlock; } void tokuraDestroy(tokuraInternalhandle_t* tokurahandle) { free(tokurahandle); } void tokuraMemorymalloc(tokuraInternalhandle_t* tokurahandle, int n, int batchCount) { int i; tokurahandle->A_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->A_tmp_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wr_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wr_tmp_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wi_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wi_tmp_device =(double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->stream = (hipStream_t*)malloc(sizeof(hipStream_t)*NUMBER_OF_STREAMS); for (i = 0; i < NUMBER_OF_STREAMS; i++) { hipMalloc((void**)&tokurahandle->A_device[i], sizeof(double)*n*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); hipMalloc((void**)&tokurahandle->A_tmp_device[i], sizeof(double)*n*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); hipMalloc((void**)&tokurahandle->wr_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); hipMalloc((void**)&tokurahandle->wi_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); hipMalloc((void**)&tokurahandle->wr_tmp_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); hipMalloc((void**)&tokurahandle->wi_tmp_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); hipStreamCreate(tokurahandle->stream + i); } } void tokuraMemoryfree(tokuraInternalhandle_t* tokurahandle, int n, int batchCount) { int i; for (i = 0; i < NUMBER_OF_STREAMS; i++) { hipFree(tokurahandle->A_device[i]); hipFree(tokurahandle->A_tmp_device[i]); hipFree(tokurahandle->wr_device[i]); hipFree(tokurahandle->wi_device[i]); hipFree(tokurahandle->wr_tmp_device[i]); hipFree(tokurahandle->wi_tmp_device[i]); hipStreamDestroy(tokurahandle->stream[i]); } free(tokurahandle->A_device); free(tokurahandle->A_tmp_device); free(tokurahandle->wr_device); free(tokurahandle->wr_tmp_device); free(tokurahandle->wi_device); free(tokurahandle->wi_tmp_device); free(tokurahandle->stream); } int tokura_dgeev_batched_MWBtune(int n, double** A, double** wr, double** wi, int batchCount, float* time) { if (!(n > 0 && n <= 32)) { // return -1; } if (A == NULL) { // return -2; } if (wr == NULL) { // return -3; } if (wi == NULL) { // return -4; } tokuraInternalhandle_t* tokurahandle; int threadnum; tokuraCreate(&tokurahandle); tokuraMemorymalloc(tokurahandle, n, batchCount); threadnum =tokura_dgeev_batched_MWB_tune(tokurahandle, n, A, wr, wi, batchCount,time); tokuraMemoryfree(tokurahandle, n, batchCount); tokuraDestroy(tokurahandle); return threadnum;//success } int tokura_dgeev_batched_SWBtune(int n, double** A, double** wr, double** wi, int batchCount, float* time) { if (!(n > 0 && n <= 32)) { // return -1; } if (A == NULL) { // return -2; } if (wr == NULL) { // return -3; } if (wi == NULL) { // return -4; } tokuraInternalhandle_t* tokurahandle; int threadnum; tokuraCreate(&tokurahandle); tokuraMemorymalloc(tokurahandle, n, batchCount); threadnum = tokura_dgeev_batched_SWB_tune(tokurahandle, n, A, wr, wi, batchCount, time); tokuraMemoryfree(tokurahandle, n, batchCount); tokuraDestroy(tokurahandle); return threadnum;//success }
2a1474320929293a130f2ee28d67720a2fd7e47b.cu
#include<stdio.h> #include<stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include"tokura_blas_define.h" #include"tokura_blas_const.h" //#include"tokura_tuned_thread_parameters.h" int tokura_dgeev_batched_MWB(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount); int tokura_dgeev_batched_SWBMWB(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount); int tokura_dgeev_batched_MWB_tune(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount, float* time); int tokura_dgeev_batched_SWB_tune(tokuraInternalhandle_t* tokurahandle, int n, double** A, double** wr, double** wi, int batchCount,float* time); void tokuraCreate(tokuraInternalhandle_t** tokurahandle) { *tokurahandle = (tokuraInternalhandle_t*)malloc(sizeof(tokuraInternalhandle_t)); cudaDeviceProp dev; cudaGetDeviceProperties(&dev, 0); (*tokurahandle)->WARP_SIZE = dev.warpSize; (*tokurahandle)->sharedsize = dev.sharedMemPerBlock; } void tokuraDestroy(tokuraInternalhandle_t* tokurahandle) { free(tokurahandle); } void tokuraMemorymalloc(tokuraInternalhandle_t* tokurahandle, int n, int batchCount) { int i; tokurahandle->A_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->A_tmp_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wr_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wr_tmp_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wi_device = (double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->wi_tmp_device =(double**)malloc(sizeof(double*)*NUMBER_OF_STREAMS); tokurahandle->stream = (cudaStream_t*)malloc(sizeof(cudaStream_t)*NUMBER_OF_STREAMS); for (i = 0; i < NUMBER_OF_STREAMS; i++) { cudaMalloc((void**)&tokurahandle->A_device[i], sizeof(double)*n*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); cudaMalloc((void**)&tokurahandle->A_tmp_device[i], sizeof(double)*n*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); cudaMalloc((void**)&tokurahandle->wr_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); cudaMalloc((void**)&tokurahandle->wi_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); cudaMalloc((void**)&tokurahandle->wr_tmp_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); cudaMalloc((void**)&tokurahandle->wi_tmp_device[i], sizeof(double)*n*NUMBER_OF_COMPUTED_MATRICES_PER_STREAM); cudaStreamCreate(tokurahandle->stream + i); } } void tokuraMemoryfree(tokuraInternalhandle_t* tokurahandle, int n, int batchCount) { int i; for (i = 0; i < NUMBER_OF_STREAMS; i++) { cudaFree(tokurahandle->A_device[i]); cudaFree(tokurahandle->A_tmp_device[i]); cudaFree(tokurahandle->wr_device[i]); cudaFree(tokurahandle->wi_device[i]); cudaFree(tokurahandle->wr_tmp_device[i]); cudaFree(tokurahandle->wi_tmp_device[i]); cudaStreamDestroy(tokurahandle->stream[i]); } free(tokurahandle->A_device); free(tokurahandle->A_tmp_device); free(tokurahandle->wr_device); free(tokurahandle->wr_tmp_device); free(tokurahandle->wi_device); free(tokurahandle->wi_tmp_device); free(tokurahandle->stream); } int tokura_dgeev_batched_MWBtune(int n, double** A, double** wr, double** wi, int batchCount, float* time) { if (!(n > 0 && n <= 32)) { //入力行列サイズチェック return -1; } if (A == NULL) { //入力行列のメモリ確保チェック return -2; } if (wr == NULL) { //出力固有値実部のメモリ確保チェック return -3; } if (wi == NULL) { //出力固有値実部のメモリ確保チェック return -4; } tokuraInternalhandle_t* tokurahandle; int threadnum; tokuraCreate(&tokurahandle); tokuraMemorymalloc(tokurahandle, n, batchCount); threadnum =tokura_dgeev_batched_MWB_tune(tokurahandle, n, A, wr, wi, batchCount,time); tokuraMemoryfree(tokurahandle, n, batchCount); tokuraDestroy(tokurahandle); return threadnum;//success } int tokura_dgeev_batched_SWBtune(int n, double** A, double** wr, double** wi, int batchCount, float* time) { if (!(n > 0 && n <= 32)) { //入力行列サイズチェック return -1; } if (A == NULL) { //入力行列のメモリ確保チェック return -2; } if (wr == NULL) { //出力固有値実部のメモリ確保チェック return -3; } if (wi == NULL) { //出力固有値実部のメモリ確保チェック return -4; } tokuraInternalhandle_t* tokurahandle; int threadnum; tokuraCreate(&tokurahandle); tokuraMemorymalloc(tokurahandle, n, batchCount); threadnum = tokura_dgeev_batched_SWB_tune(tokurahandle, n, A, wr, wi, batchCount, time); tokuraMemoryfree(tokurahandle, n, batchCount); tokuraDestroy(tokurahandle); return threadnum;//success }
04dd3906867af60add3d1acb08f57f5b82edaac9.hip
// !!! This is a file automatically generated by hipify!!! /* * watershed-CA-korbes.cu * * Created on: Dec 3, 2011 * from http://parati.dca.fee.unicamp.br/adesso/wiki/watershed/ismm2011_ca/view/ * * cellular automata approach. not as fast as DW. */ #include <hip/hip_runtime.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> namespace nscale { namespace gpu { namespace ca { #define IMUL(a, b) __mul24(a, b) //////////////////////////////////////////////////////////////////////////////// // Kernel configuration //////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2D 16 #define REAL_BLOCK 14 //------------------------------------------------------------------------------ // CUDA error check and print //------------------------------------------------------------------------------ void checkCUDAError(const char *msg); //------------------------------------------------------------------------------ // Texture image //------------------------------------------------------------------------------ texture<float, 2, hipReadModeElementType> f_tex; texture<int, 2, hipReadModeElementType> mins_tex; //------------------------------------------------------------------------------ // Constant memory for neigh //------------------------------------------------------------------------------ __constant__ int c_neigh[16]; //------------------------------------------------------------------------------ // Shared memory for image //------------------------------------------------------------------------------ __shared__ unsigned short s_img[BLOCK_SIZE_2D][BLOCK_SIZE_2D]; __shared__ unsigned short s_lambda[BLOCK_SIZE_2D][BLOCK_SIZE_2D]; __shared__ unsigned short s_label[BLOCK_SIZE_2D][BLOCK_SIZE_2D]; //------------------------------------------------------------------------------ // Round up the division a/b //------------------------------------------------------------------------------ int iDivUp(int a, int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } //------------------------------------------------------------------------------ // Generate neighborhood offsets //------------------------------------------------------------------------------ void neighvector(int conn) { int neigh[16]; int i = 0; switch(conn) { case 4: // for Y neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 0; neigh[i++] = 1; // for X neigh[i++] = 0; neigh[i++] = -1; neigh[i++] = 1; neigh[i++] = 0; break; case 8: // for Y neigh[i++] = -1; neigh[i++] = -1; neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 0; neigh[i++] = 1; neigh[i++] = 1; neigh[i++] = 1; // for X neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 1; neigh[i++] = -1; neigh[i++] = 1; neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 1; break; } hipMemcpyToSymbol(c_neigh, neigh, 16*sizeof(int), 0, hipMemcpyHostToDevice); } __device__ float cost(int fp, int fq, int px, int py, int qx, int qy) { if (fp > fq) { int minv = tex2D(mins_tex, px, py); return fp - minv; } else if (fq > fp) { int minv = tex2D(mins_tex, qx, qy); return fq - minv; } else { int minv = tex2D(mins_tex, px, py); int minv2 = tex2D(mins_tex, qx, qy); return ((fp - minv)+(fq - minv2))/2.0; } } //------------------------------------------------------------------------------ // Perform iterations of Ford-Bellmann inside block //------------------------------------------------------------------------------ __global__ void cuFordBellmann(int *label, int *label_next, int *lambda, int *lambda_next, int *flag, int w, int h, int conn) { const int px = REAL_BLOCK*blockIdx.x + threadIdx.x - 1; const int py = REAL_BLOCK*blockIdx.y + threadIdx.y - 1; const int idp = py*w + px; int count = 0; __shared__ int bChanged; if (px < 0 || py < 0 || px >= w || py >= h) { s_img[threadIdx.y][threadIdx.x] = 0xFFFF; s_lambda[threadIdx.y][threadIdx.x] = 0; s_label[threadIdx.y][threadIdx.x] = 0; } else { s_img[threadIdx.y][threadIdx.x] = tex2D(f_tex,(float)px,(float)py); s_lambda[threadIdx.y][threadIdx.x] = lambda[idp]; s_label[threadIdx.y][threadIdx.x] = label[idp]; } bChanged = 1; __syncthreads(); if (px < 0 || py < 0 || px >= w || py >= h || threadIdx.x == 0 || threadIdx.x >= BLOCK_SIZE_2D - 1 || threadIdx.y == 0 || threadIdx.y >= BLOCK_SIZE_2D - 1) return; while (bChanged && count < 28) { bChanged = 0; count++; int fp, fq; int u = 0x00FFFFFF; int ux, uy; int lambdap = s_lambda[threadIdx.y][threadIdx.x]; int idu = -1; fp = s_img[threadIdx.y][threadIdx.x]; for(int pos = 0; pos < conn ; pos++) { int qx = px + c_neigh[pos+conn]; int qy = py + c_neigh[pos]; int sqx = threadIdx.x + c_neigh[pos+conn]; int sqy = threadIdx.y + c_neigh[pos]; if (qx >= 0 && qy >= 0 && qx < w && qy < h) { int lambdaq = s_lambda[sqy][sqx]; fq = s_img[sqy][sqx]; float c = cost(fp, fq, px, py, qx, qy); if (lambdaq + c < u) { u = lambdaq + c; ux = sqx; uy = sqy; idu = 1; } } } int ulabel = 0; if (idu >= 0) ulabel = s_label[uy][ux]; __syncthreads(); if (idu >= 0 && u < lambdap) { s_lambda[threadIdx.y][threadIdx.x] = u; s_label[threadIdx.y][threadIdx.x] = ulabel; *flag += 1; bChanged = 1; } __syncthreads(); } lambda_next[idp] = s_lambda[threadIdx.y][threadIdx.x]; label_next[idp] = s_label[threadIdx.y][threadIdx.x]; } //------------------------------------------------------------------------------ // Initialize the lambda memory on the seeds and find neighborhood minima //------------------------------------------------------------------------------ __global__ void hipInit(int *lambda, int *seeds, int *mins, int w, int h, int conn) { const int px = REAL_BLOCK*blockIdx.x + threadIdx.x - 1; const int py = REAL_BLOCK*blockIdx.y + threadIdx.y - 1; const int idp = py*w + px; if (px < 0 || py < 0 || px >= w || py >= h) { s_img[threadIdx.y][threadIdx.x] = 0xFFFF; } else { s_img[threadIdx.y][threadIdx.x] = tex2D(f_tex,(float)px,(float)py); } if (px < 0 || py < 0 || px >= w || py >= h || threadIdx.x == 0 || threadIdx.x >= BLOCK_SIZE_2D - 1 || threadIdx.y == 0 || threadIdx.y >= BLOCK_SIZE_2D - 1) return; lambda[idp] = (seeds[idp] == 0) * 0x00FFFFFF; int minv = 0x7FFFFFFF; int fv; for(int pos = 0; pos < conn ; pos++) { int vx = threadIdx.x + c_neigh[pos+conn]; int vy = threadIdx.y + c_neigh[pos]; fv = s_img[vy][vx]; if (fv < minv) minv = fv; } mins[idp] = minv; } //------------------------------------------------------------------------------ // Watershed by Kauffmann & Piche //------------------------------------------------------------------------------ __host__ float ws_kauffmann(int *label, // output float *f, // input int *seeds, // seeds (regional minima) int w, // width int h, //height int conn) // connectivity (4 or 8) { int *label_d, *label_next_d, *lambda_d, *lambda_next_d, *flag_d, *mins_d; unsigned int timer; float measuredTime; timeval tim; hipArray *f_d; hipArray *mins_a_d; int flag; int sizei = w * h * sizeof(int); int sizec = w * h * sizeof(float); // Setup the grid hierarchy dim3 dimBlock(BLOCK_SIZE_2D,BLOCK_SIZE_2D); dim3 dimGrid(iDivUp(w,REAL_BLOCK),iDivUp(h,REAL_BLOCK)); neighvector(conn); hipChannelFormatDesc desc8 = hipCreateChannelDesc<float>(); checkCUDAError("cudaCreateChannelDesc8"); hipChannelFormatDesc desc32 = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindSigned); checkCUDAError("cudaCreateChannelDesc32"); // Allocate memory hipMallocArray(&f_d, &desc8, w, h); checkCUDAError("hipMallocArray f_d"); hipMemcpyToArray(f_d, 0, 0, f, sizec, hipMemcpyDeviceToDevice); checkCUDAError("hipMemcpyToArray f_d"); hipBindTextureToArray(f_tex, f_d); checkCUDAError("hipBindTextureToArray f_tex"); hipMalloc((void**)&label_d, sizei); hipMemset(label_d, 0, sizei); hipMalloc((void**)&label_next_d, sizei); hipMemcpy(label_next_d, seeds, sizei, hipMemcpyDeviceToDevice); hipMalloc((void**)&lambda_d, sizei); hipMemset(lambda_d, 0, sizei); hipMalloc((void**)&lambda_next_d, sizei); hipMemset(lambda_next_d, 0, sizei); hipMalloc((void**)&mins_d, sizei); hipMemset(mins_d, 0, sizei); hipMalloc((void**)&flag_d, sizeof(int)); gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); // Initialize the lambda image with zeros on minima hipLaunchKernelGGL(( hipInit), dim3(dimGrid), dim3(dimBlock), 0, 0, lambda_next_d, label_next_d, mins_d, w, h, conn); hipDeviceSynchronize(); hipMallocArray(&mins_a_d, &desc32, w, h); checkCUDAError("hipMallocArray mins_a_d"); hipMemcpyToArray(mins_a_d, 0, 0, mins_d, sizei, hipMemcpyDeviceToDevice); checkCUDAError("hipMemcpyToArray mins_a_d"); hipBindTextureToArray(mins_tex, mins_a_d); checkCUDAError("hipBindTextureToArray mins_a_d"); // Iterate until stabilization int iter = 0; do{ iter++; //pyprintf("iter\n"); hipMemset(flag_d, 0, sizeof(int)); hipMemcpy(lambda_d, lambda_next_d, sizei, hipMemcpyDeviceToDevice); hipMemcpy(label_d, label_next_d, sizei, hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( cuFordBellmann), dim3(dimGrid), dim3(dimBlock), 0, 0, label_d, label_next_d, lambda_d, lambda_next_d, flag_d, w, h, conn); hipDeviceSynchronize(); hipMemcpy(&flag, flag_d, sizeof(int), hipMemcpyDeviceToHost); }while(flag > 0 && iter < 2000); //cutStopTimer(timer); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); //pyprintf("iter: %d\n", iter); // Copy the labels hipMemcpy(label, label_d, sizei, hipMemcpyDeviceToDevice); // Free and Unbind memory hipFree(mins_d); hipFreeArray(f_d); hipFreeArray(mins_a_d); hipFree(lambda_d); hipFree(lambda_next_d); hipFree(label_d); hipFree(label_next_d); hipFree(flag_d); hipUnbindTexture(f_tex); hipUnbindTexture(mins_tex); return t2-t1; } //------------------------------------------------------------------------------ // Error Check //------------------------------------------------------------------------------ void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if(hipSuccess != err) { printf("Cuda error: %s: %s. \n", msg, hipGetErrorString(err)); exit(-1); } } }}}
04dd3906867af60add3d1acb08f57f5b82edaac9.cu
/* * watershed-CA-korbes.cu * * Created on: Dec 3, 2011 * from http://parati.dca.fee.unicamp.br/adesso/wiki/watershed/ismm2011_ca/view/ * * cellular automata approach. not as fast as DW. */ #include <cuda_runtime.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> namespace nscale { namespace gpu { namespace ca { #define IMUL(a, b) __mul24(a, b) //////////////////////////////////////////////////////////////////////////////// // Kernel configuration //////////////////////////////////////////////////////////////////////////////// #define BLOCK_SIZE_2D 16 #define REAL_BLOCK 14 //------------------------------------------------------------------------------ // CUDA error check and print //------------------------------------------------------------------------------ void checkCUDAError(const char *msg); //------------------------------------------------------------------------------ // Texture image //------------------------------------------------------------------------------ texture<float, 2, cudaReadModeElementType> f_tex; texture<int, 2, cudaReadModeElementType> mins_tex; //------------------------------------------------------------------------------ // Constant memory for neigh //------------------------------------------------------------------------------ __constant__ int c_neigh[16]; //------------------------------------------------------------------------------ // Shared memory for image //------------------------------------------------------------------------------ __shared__ unsigned short s_img[BLOCK_SIZE_2D][BLOCK_SIZE_2D]; __shared__ unsigned short s_lambda[BLOCK_SIZE_2D][BLOCK_SIZE_2D]; __shared__ unsigned short s_label[BLOCK_SIZE_2D][BLOCK_SIZE_2D]; //------------------------------------------------------------------------------ // Round up the division a/b //------------------------------------------------------------------------------ int iDivUp(int a, int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } //------------------------------------------------------------------------------ // Generate neighborhood offsets //------------------------------------------------------------------------------ void neighvector(int conn) { int neigh[16]; int i = 0; switch(conn) { case 4: // for Y neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 0; neigh[i++] = 1; // for X neigh[i++] = 0; neigh[i++] = -1; neigh[i++] = 1; neigh[i++] = 0; break; case 8: // for Y neigh[i++] = -1; neigh[i++] = -1; neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 0; neigh[i++] = 1; neigh[i++] = 1; neigh[i++] = 1; // for X neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 1; neigh[i++] = -1; neigh[i++] = 1; neigh[i++] = -1; neigh[i++] = 0; neigh[i++] = 1; break; } cudaMemcpyToSymbol(c_neigh, neigh, 16*sizeof(int), 0, cudaMemcpyHostToDevice); } __device__ float cost(int fp, int fq, int px, int py, int qx, int qy) { if (fp > fq) { int minv = tex2D(mins_tex, px, py); return fp - minv; } else if (fq > fp) { int minv = tex2D(mins_tex, qx, qy); return fq - minv; } else { int minv = tex2D(mins_tex, px, py); int minv2 = tex2D(mins_tex, qx, qy); return ((fp - minv)+(fq - minv2))/2.0; } } //------------------------------------------------------------------------------ // Perform iterations of Ford-Bellmann inside block //------------------------------------------------------------------------------ __global__ void cuFordBellmann(int *label, int *label_next, int *lambda, int *lambda_next, int *flag, int w, int h, int conn) { const int px = REAL_BLOCK*blockIdx.x + threadIdx.x - 1; const int py = REAL_BLOCK*blockIdx.y + threadIdx.y - 1; const int idp = py*w + px; int count = 0; __shared__ int bChanged; if (px < 0 || py < 0 || px >= w || py >= h) { s_img[threadIdx.y][threadIdx.x] = 0xFFFF; s_lambda[threadIdx.y][threadIdx.x] = 0; s_label[threadIdx.y][threadIdx.x] = 0; } else { s_img[threadIdx.y][threadIdx.x] = tex2D(f_tex,(float)px,(float)py); s_lambda[threadIdx.y][threadIdx.x] = lambda[idp]; s_label[threadIdx.y][threadIdx.x] = label[idp]; } bChanged = 1; __syncthreads(); if (px < 0 || py < 0 || px >= w || py >= h || threadIdx.x == 0 || threadIdx.x >= BLOCK_SIZE_2D - 1 || threadIdx.y == 0 || threadIdx.y >= BLOCK_SIZE_2D - 1) return; while (bChanged && count < 28) { bChanged = 0; count++; int fp, fq; int u = 0x00FFFFFF; int ux, uy; int lambdap = s_lambda[threadIdx.y][threadIdx.x]; int idu = -1; fp = s_img[threadIdx.y][threadIdx.x]; for(int pos = 0; pos < conn ; pos++) { int qx = px + c_neigh[pos+conn]; int qy = py + c_neigh[pos]; int sqx = threadIdx.x + c_neigh[pos+conn]; int sqy = threadIdx.y + c_neigh[pos]; if (qx >= 0 && qy >= 0 && qx < w && qy < h) { int lambdaq = s_lambda[sqy][sqx]; fq = s_img[sqy][sqx]; float c = cost(fp, fq, px, py, qx, qy); if (lambdaq + c < u) { u = lambdaq + c; ux = sqx; uy = sqy; idu = 1; } } } int ulabel = 0; if (idu >= 0) ulabel = s_label[uy][ux]; __syncthreads(); if (idu >= 0 && u < lambdap) { s_lambda[threadIdx.y][threadIdx.x] = u; s_label[threadIdx.y][threadIdx.x] = ulabel; *flag += 1; bChanged = 1; } __syncthreads(); } lambda_next[idp] = s_lambda[threadIdx.y][threadIdx.x]; label_next[idp] = s_label[threadIdx.y][threadIdx.x]; } //------------------------------------------------------------------------------ // Initialize the lambda memory on the seeds and find neighborhood minima //------------------------------------------------------------------------------ __global__ void cuInit(int *lambda, int *seeds, int *mins, int w, int h, int conn) { const int px = REAL_BLOCK*blockIdx.x + threadIdx.x - 1; const int py = REAL_BLOCK*blockIdx.y + threadIdx.y - 1; const int idp = py*w + px; if (px < 0 || py < 0 || px >= w || py >= h) { s_img[threadIdx.y][threadIdx.x] = 0xFFFF; } else { s_img[threadIdx.y][threadIdx.x] = tex2D(f_tex,(float)px,(float)py); } if (px < 0 || py < 0 || px >= w || py >= h || threadIdx.x == 0 || threadIdx.x >= BLOCK_SIZE_2D - 1 || threadIdx.y == 0 || threadIdx.y >= BLOCK_SIZE_2D - 1) return; lambda[idp] = (seeds[idp] == 0) * 0x00FFFFFF; int minv = 0x7FFFFFFF; int fv; for(int pos = 0; pos < conn ; pos++) { int vx = threadIdx.x + c_neigh[pos+conn]; int vy = threadIdx.y + c_neigh[pos]; fv = s_img[vy][vx]; if (fv < minv) minv = fv; } mins[idp] = minv; } //------------------------------------------------------------------------------ // Watershed by Kauffmann & Piche //------------------------------------------------------------------------------ __host__ float ws_kauffmann(int *label, // output float *f, // input int *seeds, // seeds (regional minima) int w, // width int h, //height int conn) // connectivity (4 or 8) { int *label_d, *label_next_d, *lambda_d, *lambda_next_d, *flag_d, *mins_d; unsigned int timer; float measuredTime; timeval tim; cudaArray *f_d; cudaArray *mins_a_d; int flag; int sizei = w * h * sizeof(int); int sizec = w * h * sizeof(float); // Setup the grid hierarchy dim3 dimBlock(BLOCK_SIZE_2D,BLOCK_SIZE_2D); dim3 dimGrid(iDivUp(w,REAL_BLOCK),iDivUp(h,REAL_BLOCK)); neighvector(conn); cudaChannelFormatDesc desc8 = cudaCreateChannelDesc<float>(); checkCUDAError("cudaCreateChannelDesc8"); cudaChannelFormatDesc desc32 = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindSigned); checkCUDAError("cudaCreateChannelDesc32"); // Allocate memory cudaMallocArray(&f_d, &desc8, w, h); checkCUDAError("cudaMallocArray f_d"); cudaMemcpyToArray(f_d, 0, 0, f, sizec, cudaMemcpyDeviceToDevice); checkCUDAError("cudaMemcpyToArray f_d"); cudaBindTextureToArray(f_tex, f_d); checkCUDAError("cudaBindTextureToArray f_tex"); cudaMalloc((void**)&label_d, sizei); cudaMemset(label_d, 0, sizei); cudaMalloc((void**)&label_next_d, sizei); cudaMemcpy(label_next_d, seeds, sizei, cudaMemcpyDeviceToDevice); cudaMalloc((void**)&lambda_d, sizei); cudaMemset(lambda_d, 0, sizei); cudaMalloc((void**)&lambda_next_d, sizei); cudaMemset(lambda_next_d, 0, sizei); cudaMalloc((void**)&mins_d, sizei); cudaMemset(mins_d, 0, sizei); cudaMalloc((void**)&flag_d, sizeof(int)); gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); // Initialize the lambda image with zeros on minima cuInit<<<dimGrid, dimBlock>>>(lambda_next_d, label_next_d, mins_d, w, h, conn); cudaThreadSynchronize(); cudaMallocArray(&mins_a_d, &desc32, w, h); checkCUDAError("cudaMallocArray mins_a_d"); cudaMemcpyToArray(mins_a_d, 0, 0, mins_d, sizei, cudaMemcpyDeviceToDevice); checkCUDAError("cudaMemcpyToArray mins_a_d"); cudaBindTextureToArray(mins_tex, mins_a_d); checkCUDAError("cudaBindTextureToArray mins_a_d"); // Iterate until stabilization int iter = 0; do{ iter++; //pyprintf("iter\n"); cudaMemset(flag_d, 0, sizeof(int)); cudaMemcpy(lambda_d, lambda_next_d, sizei, cudaMemcpyDeviceToDevice); cudaMemcpy(label_d, label_next_d, sizei, cudaMemcpyDeviceToDevice); cuFordBellmann<<<dimGrid, dimBlock>>>(label_d, label_next_d, lambda_d, lambda_next_d, flag_d, w, h, conn); cudaThreadSynchronize(); cudaMemcpy(&flag, flag_d, sizeof(int), cudaMemcpyDeviceToHost); }while(flag > 0 && iter < 2000); //cutStopTimer(timer); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); //pyprintf("iter: %d\n", iter); // Copy the labels cudaMemcpy(label, label_d, sizei, cudaMemcpyDeviceToDevice); // Free and Unbind memory cudaFree(mins_d); cudaFreeArray(f_d); cudaFreeArray(mins_a_d); cudaFree(lambda_d); cudaFree(lambda_next_d); cudaFree(label_d); cudaFree(label_next_d); cudaFree(flag_d); cudaUnbindTexture(f_tex); cudaUnbindTexture(mins_tex); return t2-t1; } //------------------------------------------------------------------------------ // Error Check //------------------------------------------------------------------------------ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if(cudaSuccess != err) { printf("Cuda error: %s: %s. \n", msg, cudaGetErrorString(err)); exit(-1); } } }}}
9ef7429ab75f093ffb13500f2d523434c18984d0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <unistd.h> #include <string> #include <hip/hip_runtime.h> #define ThreadNum 256 #define BlockNum 16 __global__ void printOut(char *string) { printf("%s\n", string); } size_t getFileSize(char *filename) { struct stat st; stat(filename, &st); return st.st_size; } void parsing(char *aim, long int **offset_table, int *entry) { int limit = 1024; int i; long int *tmp_offset = (long int*) malloc(sizeof(long int) * limit); char *token = strtok(aim, "\n"); for (i = 0; token != NULL; i ++) { if (i == limit) { limit += 1024; tmp_offset = (long int*) realloc(tmp_offset, sizeof(long int) * limit); } tmp_offset[i] = token - aim; token = strtok(NULL, "\n"); } printf("Count %d\n", i); // realloc table tmp_offset = (long int*) realloc(tmp_offset, sizeof(long int) * i); // assign & return *offset_table = tmp_offset; *entry = i; } __device__ int strlen(char *s) { int i = 0; while (s[i] != '\0') i ++; return i; } __device__ char *strstrDevice(char *a, char *b) { int i, j; int a_len = strlen(a); int b_len = strlen(b); int loop_limit = a_len - b_len + 1; for (i = 0; i < loop_limit; i ++) { for (j = 0; j < b_len && a[i + j] == b[j]; j ++); if (j == b_len) return a + i; } return NULL; } __global__ void matching(char *aim, char *string, long int *offset_table, int entry, int base, int *result) { int t_id = threadIdx.x; int b_id = blockIdx.x; int b_dim = blockDim.x; int index = base + b_id * b_dim + t_id; //int aim_len = offset_table[index + 1] - offset_table[index]; //if (index < entry && strstrDevice(string + offset_table[index], aim_len, "apple", 5) != NULL) { if (index < entry && strstrDevice(string + offset_table[index], aim) != NULL) { result[index] = 1; } else { result[index] = 0; } } int myCmp(const void *a, const void *b) { return (*(int*) a) - (*(int*) b); } int main(int argc, char *argv[]) { char *filename = argv[1]; int fd = open(filename, O_RDONLY, 0644); // get mmap data size_t file_len = getFileSize(filename) + 1; char *filecontent = (char*) mmap(NULL, file_len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); filecontent[file_len - 1] = '\0'; // parsing long int *offset_table; int entry; parsing(filecontent, &offset_table, &entry); // copy data to device char *HD_filecontent; hipMalloc(&HD_filecontent, file_len); hipMemcpy(HD_filecontent, filecontent, file_len, hipMemcpyHostToDevice); // copy offset table to device long int *D_offset_table; hipMalloc(&D_offset_table, sizeof(long int) * entry); hipMemcpy(D_offset_table, offset_table, sizeof(long int) * entry, hipMemcpyHostToDevice); // matching int round_limit = ceil(entry / (float) (ThreadNum * BlockNum)); int i; int *result; hipMallocManaged(&result, sizeof(int) * entry); char *aim; hipMallocManaged(&aim, sizeof(char) * 6); strcpy(aim, "apple"); hipDeviceSynchronize(); for (i = 0; i < round_limit; i ++) { hipLaunchKernelGGL(( matching), dim3(BlockNum), dim3(ThreadNum), 0, 0, aim, HD_filecontent, D_offset_table, entry, i * ThreadNum * BlockNum, result); } hipDeviceSynchronize(); qsort(result, entry, sizeof(int), myCmp); return 0; }
9ef7429ab75f093ffb13500f2d523434c18984d0.cu
#include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <unistd.h> #include <string> #include <cuda.h> #define ThreadNum 256 #define BlockNum 16 __global__ void printOut(char *string) { printf("%s\n", string); } size_t getFileSize(char *filename) { struct stat st; stat(filename, &st); return st.st_size; } void parsing(char *aim, long int **offset_table, int *entry) { int limit = 1024; int i; long int *tmp_offset = (long int*) malloc(sizeof(long int) * limit); char *token = strtok(aim, "\n"); for (i = 0; token != NULL; i ++) { if (i == limit) { limit += 1024; tmp_offset = (long int*) realloc(tmp_offset, sizeof(long int) * limit); } tmp_offset[i] = token - aim; token = strtok(NULL, "\n"); } printf("Count %d\n", i); // realloc table tmp_offset = (long int*) realloc(tmp_offset, sizeof(long int) * i); // assign & return *offset_table = tmp_offset; *entry = i; } __device__ int strlen(char *s) { int i = 0; while (s[i] != '\0') i ++; return i; } __device__ char *strstrDevice(char *a, char *b) { int i, j; int a_len = strlen(a); int b_len = strlen(b); int loop_limit = a_len - b_len + 1; for (i = 0; i < loop_limit; i ++) { for (j = 0; j < b_len && a[i + j] == b[j]; j ++); if (j == b_len) return a + i; } return NULL; } __global__ void matching(char *aim, char *string, long int *offset_table, int entry, int base, int *result) { int t_id = threadIdx.x; int b_id = blockIdx.x; int b_dim = blockDim.x; int index = base + b_id * b_dim + t_id; //int aim_len = offset_table[index + 1] - offset_table[index]; //if (index < entry && strstrDevice(string + offset_table[index], aim_len, "apple", 5) != NULL) { if (index < entry && strstrDevice(string + offset_table[index], aim) != NULL) { result[index] = 1; } else { result[index] = 0; } } int myCmp(const void *a, const void *b) { return (*(int*) a) - (*(int*) b); } int main(int argc, char *argv[]) { char *filename = argv[1]; int fd = open(filename, O_RDONLY, 0644); // get mmap data size_t file_len = getFileSize(filename) + 1; char *filecontent = (char*) mmap(NULL, file_len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); filecontent[file_len - 1] = '\0'; // parsing long int *offset_table; int entry; parsing(filecontent, &offset_table, &entry); // copy data to device char *HD_filecontent; cudaMalloc(&HD_filecontent, file_len); cudaMemcpy(HD_filecontent, filecontent, file_len, cudaMemcpyHostToDevice); // copy offset table to device long int *D_offset_table; cudaMalloc(&D_offset_table, sizeof(long int) * entry); cudaMemcpy(D_offset_table, offset_table, sizeof(long int) * entry, cudaMemcpyHostToDevice); // matching int round_limit = ceil(entry / (float) (ThreadNum * BlockNum)); int i; int *result; cudaMallocManaged(&result, sizeof(int) * entry); char *aim; cudaMallocManaged(&aim, sizeof(char) * 6); strcpy(aim, "apple"); cudaDeviceSynchronize(); for (i = 0; i < round_limit; i ++) { matching<<<BlockNum, ThreadNum>>>(aim, HD_filecontent, D_offset_table, entry, i * ThreadNum * BlockNum, result); } cudaDeviceSynchronize(); qsort(result, entry, sizeof(int), myCmp); return 0; }
98312742570111b2448cbadf51e66b79ac6a36a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv ([email protected]) // #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <system/op_boilerplate.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL void amsGradUpdaterCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vinv, const sd::LongType* invShapeInfo, const void* vinm, const sd::LongType* inmShapeInfo, const void* vinh, const sd::LongType* inhShapeInfo, void* vz, const sd::LongType* zShapeInfo, void* vstV, const sd::LongType* stvShapeInfo, void* vstM, const sd::LongType* stmShapeInfo, void* vstH, const sd::LongType* sthShapeInfo, const T lr, const T beta1, const T beta2, const T epsilon, const T iteration) { const auto grad = reinterpret_cast<const T*>(vx); const auto initV = reinterpret_cast<const T*>(vinv); const auto initM = reinterpret_cast<const T*>(vinm); const auto initH = reinterpret_cast<const T*>(vinh); auto up = reinterpret_cast<T*>(vz); auto stV = reinterpret_cast<T*>(vstV); auto stM = reinterpret_cast<T*>(vstM); auto stH = reinterpret_cast<T*>(vstH); __shared__ sd::LongType xLen; __shared__ T mbeta1, mbeta2, epsilonT; __shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame, bXInHSame, bXStHSame; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); epsilonT = lr * sd::math::sd_sqrt<T, T>(1.0 - sd::math::sd_pow<T, T, T>(beta2, (iteration + 1))) / (1.0 - sd::math::sd_pow<T, T, T>(beta1, (iteration + 1))); if (sd::math::sd_isnan(epsilonT) || 0 == epsilonT || sd::math::sd_isinf(epsilonT)) epsilonT = epsilon; mbeta1 = (1 - beta1); mbeta2 = (1 - beta2); bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) && 1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo) && 1 == shape::elementWiseStride(sthShapeInfo) && 1 == shape::elementWiseStride(inhShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(zShapeInfo) == shape::order(stmShapeInfo) && shape::order(stmShapeInfo) == shape::order(inmShapeInfo) && shape::order(inmShapeInfo) == shape::order(stvShapeInfo) && shape::order(stvShapeInfo) == shape::order(invShapeInfo) && shape::order(invShapeInfo) == shape::order(sthShapeInfo) && shape::order(sthShapeInfo) == shape::order(inhShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo); bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo); bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo); bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo); bXInHSame = shape::haveSameShapeAndStrides(xShapeInfo, inhShapeInfo); bXStHSame = shape::haveSameShapeAndStrides(xShapeInfo, sthShapeInfo); } __syncthreads(); int coords[SD_MAX_RANK]; for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initMOffset = i, initVOffset = i, initHOffset = i, stMOffset = i, stVOffset = i, stHOffset = i; if (!bEWS || !bOrdering) { shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords); stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords); initVOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords); stVOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords); initHOffset = bXInHSame ? xOffset : shape::getOffset(inhShapeInfo, coords); stHOffset = bXStHSame ? xOffset : shape::getOffset(sthShapeInfo, coords); } stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * mbeta1; stV[stVOffset] = beta2 * initV[initVOffset] + grad[xOffset] * grad[xOffset] * mbeta2; stH[stHOffset] = sd::math::sd_max(initH[initHOffset], stV[stVOffset]); up[zOffset] = epsilonT * stM[stMOffset] / (sd::math::sd_sqrt<T, T>(stH[stHOffset]) + epsilon); } } /////////////////////////////////////////////////////////////////// template <typename T> void amsGradUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, const void* vinv, const sd::LongType* invShapeInfo, const void* vinm, const sd::LongType* inmShapeInfo, const void* vinh, const sd::LongType* inhShapeInfo, void* vz, const sd::LongType* zShapeInfo, void* vstV, const sd::LongType* stvShapeInfo, void* vstM, const sd::LongType* stmShapeInfo, void* vstH, const sd::LongType* sthShapeInfo, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { const T lr = static_cast<T>(dLr); const T beta1 = static_cast<T>(dBeta1); const T beta2 = static_cast<T>(dBeta2); const T epsilon = static_cast<T>(dEpsilon); const T iteration = static_cast<T>(nIteration); hipLaunchKernelGGL(( amsGradUpdaterCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vinh, inhShapeInfo, vz, zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo, vstH, sthShapeInfo, lr, beta1, beta2, epsilon, iteration); } /////////////////////////////////////////////////////////////////// void updaterAmsGrad(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateV, const NDArray& initStateM, const NDArray& initStateH, NDArray& update, NDArray& stateV, NDArray& stateM, NDArray& stateH, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { PointersManager manager(context, "amsGradUpdater"); const int threadsPerBlock = SD_MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({&update, &stateV, &stateM, &stateH}, {&gradient, &initStateV, &initStateM, &initStateH}); BUILD_SINGLE_SELECTOR( gradient.dataType(), amsGradUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(), initStateV.specialBuffer(), initStateV.specialShapeInfo(), initStateM.specialBuffer(), initStateM.specialShapeInfo(), initStateH.specialBuffer(), initStateH.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(), stateV.specialBuffer(), stateV.specialShapeInfo(), stateM.specialBuffer(), stateM.specialShapeInfo(), stateH.specialBuffer(), stateH.specialShapeInfo(), dLr, dBeta1, dBeta2, dEpsilon, nIteration), SD_FLOAT_TYPES); NDArray::registerSpecialUse({&update, &stateV, &stateM, &stateH}, {&gradient, &initStateV, &initStateM, &initStateH}); manager.synchronize(); } } // namespace helpers } // namespace ops } // namespace sd
98312742570111b2448cbadf51e66b79ac6a36a4.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv ([email protected]) // #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <system/op_boilerplate.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL void amsGradUpdaterCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vinv, const sd::LongType* invShapeInfo, const void* vinm, const sd::LongType* inmShapeInfo, const void* vinh, const sd::LongType* inhShapeInfo, void* vz, const sd::LongType* zShapeInfo, void* vstV, const sd::LongType* stvShapeInfo, void* vstM, const sd::LongType* stmShapeInfo, void* vstH, const sd::LongType* sthShapeInfo, const T lr, const T beta1, const T beta2, const T epsilon, const T iteration) { const auto grad = reinterpret_cast<const T*>(vx); const auto initV = reinterpret_cast<const T*>(vinv); const auto initM = reinterpret_cast<const T*>(vinm); const auto initH = reinterpret_cast<const T*>(vinh); auto up = reinterpret_cast<T*>(vz); auto stV = reinterpret_cast<T*>(vstV); auto stM = reinterpret_cast<T*>(vstM); auto stH = reinterpret_cast<T*>(vstH); __shared__ sd::LongType xLen; __shared__ T mbeta1, mbeta2, epsilonT; __shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame, bXInHSame, bXStHSame; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); epsilonT = lr * sd::math::sd_sqrt<T, T>(1.0 - sd::math::sd_pow<T, T, T>(beta2, (iteration + 1))) / (1.0 - sd::math::sd_pow<T, T, T>(beta1, (iteration + 1))); if (sd::math::sd_isnan(epsilonT) || 0 == epsilonT || sd::math::sd_isinf(epsilonT)) epsilonT = epsilon; mbeta1 = (1 - beta1); mbeta2 = (1 - beta2); bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) && 1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo) && 1 == shape::elementWiseStride(sthShapeInfo) && 1 == shape::elementWiseStride(inhShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(zShapeInfo) == shape::order(stmShapeInfo) && shape::order(stmShapeInfo) == shape::order(inmShapeInfo) && shape::order(inmShapeInfo) == shape::order(stvShapeInfo) && shape::order(stvShapeInfo) == shape::order(invShapeInfo) && shape::order(invShapeInfo) == shape::order(sthShapeInfo) && shape::order(sthShapeInfo) == shape::order(inhShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo); bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo); bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo); bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo); bXInHSame = shape::haveSameShapeAndStrides(xShapeInfo, inhShapeInfo); bXStHSame = shape::haveSameShapeAndStrides(xShapeInfo, sthShapeInfo); } __syncthreads(); int coords[SD_MAX_RANK]; for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initMOffset = i, initVOffset = i, initHOffset = i, stMOffset = i, stVOffset = i, stHOffset = i; if (!bEWS || !bOrdering) { shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords); stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords); initVOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords); stVOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords); initHOffset = bXInHSame ? xOffset : shape::getOffset(inhShapeInfo, coords); stHOffset = bXStHSame ? xOffset : shape::getOffset(sthShapeInfo, coords); } stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * mbeta1; stV[stVOffset] = beta2 * initV[initVOffset] + grad[xOffset] * grad[xOffset] * mbeta2; stH[stHOffset] = sd::math::sd_max(initH[initHOffset], stV[stVOffset]); up[zOffset] = epsilonT * stM[stMOffset] / (sd::math::sd_sqrt<T, T>(stH[stHOffset]) + epsilon); } } /////////////////////////////////////////////////////////////////// template <typename T> void amsGradUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo, const void* vinv, const sd::LongType* invShapeInfo, const void* vinm, const sd::LongType* inmShapeInfo, const void* vinh, const sd::LongType* inhShapeInfo, void* vz, const sd::LongType* zShapeInfo, void* vstV, const sd::LongType* stvShapeInfo, void* vstM, const sd::LongType* stmShapeInfo, void* vstH, const sd::LongType* sthShapeInfo, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { const T lr = static_cast<T>(dLr); const T beta1 = static_cast<T>(dBeta1); const T beta2 = static_cast<T>(dBeta2); const T epsilon = static_cast<T>(dEpsilon); const T iteration = static_cast<T>(nIteration); amsGradUpdaterCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>( vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vinh, inhShapeInfo, vz, zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo, vstH, sthShapeInfo, lr, beta1, beta2, epsilon, iteration); } /////////////////////////////////////////////////////////////////// void updaterAmsGrad(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateV, const NDArray& initStateM, const NDArray& initStateH, NDArray& update, NDArray& stateV, NDArray& stateM, NDArray& stateH, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { PointersManager manager(context, "amsGradUpdater"); const int threadsPerBlock = SD_MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({&update, &stateV, &stateM, &stateH}, {&gradient, &initStateV, &initStateM, &initStateH}); BUILD_SINGLE_SELECTOR( gradient.dataType(), amsGradUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(), initStateV.specialBuffer(), initStateV.specialShapeInfo(), initStateM.specialBuffer(), initStateM.specialShapeInfo(), initStateH.specialBuffer(), initStateH.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(), stateV.specialBuffer(), stateV.specialShapeInfo(), stateM.specialBuffer(), stateM.specialShapeInfo(), stateH.specialBuffer(), stateH.specialShapeInfo(), dLr, dBeta1, dBeta2, dEpsilon, nIteration), SD_FLOAT_TYPES); NDArray::registerSpecialUse({&update, &stateV, &stateM, &stateH}, {&gradient, &initStateV, &initStateM, &initStateH}); manager.synchronize(); } } // namespace helpers } // namespace ops } // namespace sd
43754d11efd9efcb9ed407e5325a0f8fce93d5a6.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <cudnn.h> #include "../stdafx.h" #include "../arithmetic.h" using namespace SN_Base; /// tensor - input data and output data of each node of the network. Tensor::Tensor(const snSize& sz) : sz_(sz){ size_t ssz = sz.size(); if (ssz > 0){ cuAssert(hipMalloc(&dataGPU_, ssz * sizeof(snFloat))); cuAssert(hipMemset(dataGPU_, 0, ssz * sizeof(snFloat))); } } Tensor::~Tensor(){ if (dataGPU_) cuAssert(hipFree(dataGPU_)); if (dataCPU_) free(dataCPU_); } Tensor::Tensor(const Tensor& other){ setDataGPU(other.getDataGPU(), other.size()); } Tensor& Tensor::operator=(const Tensor& other){ setDataGPU(other.getDataGPU(), other.size()); return *this; } Tensor& Tensor::operator+=(const Tensor& other){ ASSERT_MESS(other == *this, ""); summ(sz_, dataGPU_, other.getDataGPU()); return *this; } Tensor& Tensor::operator-=(const Tensor& other){ ASSERT_MESS(other == *this, ""); difference(sz_, dataGPU_, other.getDataGPU()); return *this; } void Tensor::setDataGPU(const snFloat* data, const snSize& nsz){ size_t nnsz = nsz.size(); ASSERT_MESS(data && (nnsz > 0), ""); if (sz_.size() < nnsz){ if (dataGPU_) cuAssert(hipFree(dataGPU_)); cuAssert(hipMalloc(&dataGPU_, nnsz * sizeof(snFloat))); } cuAssert(hipMemcpy(dataGPU_, data, nnsz * sizeof(snFloat), hipMemcpyKind::hipMemcpyDeviceToDevice)); sz_ = nsz; } void Tensor::setDataCPU(const snFloat* data, const snSize& nsz){ size_t nnsz = nsz.size(); ASSERT_MESS(data && (nnsz > 0), ""); if (sz_.size() < nnsz){ if (dataGPU_) cuAssert(hipFree(dataGPU_)); cuAssert(hipMalloc(&dataGPU_, nnsz * sizeof(snFloat))); } cuAssert(hipMemcpy(dataGPU_, data, nnsz * sizeof(snFloat), hipMemcpyKind::hipMemcpyHostToDevice)); sz_ = nsz; } snFloat* Tensor::getDataGPU() const{ return dataGPU_; } snFloat* Tensor::getDataCPU() const{ size_t csz = sz_.size(); dataCPU_ = (snFloat*)realloc(dataCPU_, csz * sizeof(snFloat)); cuAssert(hipMemcpy(dataCPU_, dataGPU_, csz * sizeof(snFloat), hipMemcpyKind::hipMemcpyDeviceToHost)); return dataCPU_; } void Tensor::resize(const snSize& nsz){ size_t nnsz = nsz.size(), csz = sz_.size(); ASSERT_MESS(nnsz > 0, ""); if (csz < nnsz){ snFloat* mem = nullptr; cuAssert(hipMalloc(&mem, nnsz * sizeof(snFloat))); if (dataGPU_){ if (csz > 0) cuAssert(hipMemcpy(mem, dataGPU_, csz * sizeof(snFloat), hipMemcpyKind::hipMemcpyDeviceToDevice)); cuAssert(hipFree(dataGPU_)); } dataGPU_ = mem; cuAssert(hipMemset(dataGPU_ + csz, 0, (nnsz - csz) * sizeof(snFloat))); } sz_ = nsz; }
43754d11efd9efcb9ed407e5325a0f8fce93d5a6.cu
 #include <cuda_runtime.h> #include <cudnn.h> #include "../stdafx.h" #include "../arithmetic.h" using namespace SN_Base; /// tensor - input data and output data of each node of the network. Tensor::Tensor(const snSize& sz) : sz_(sz){ size_t ssz = sz.size(); if (ssz > 0){ cuAssert(cudaMalloc(&dataGPU_, ssz * sizeof(snFloat))); cuAssert(cudaMemset(dataGPU_, 0, ssz * sizeof(snFloat))); } } Tensor::~Tensor(){ if (dataGPU_) cuAssert(cudaFree(dataGPU_)); if (dataCPU_) free(dataCPU_); } Tensor::Tensor(const Tensor& other){ setDataGPU(other.getDataGPU(), other.size()); } Tensor& Tensor::operator=(const Tensor& other){ setDataGPU(other.getDataGPU(), other.size()); return *this; } Tensor& Tensor::operator+=(const Tensor& other){ ASSERT_MESS(other == *this, ""); summ(sz_, dataGPU_, other.getDataGPU()); return *this; } Tensor& Tensor::operator-=(const Tensor& other){ ASSERT_MESS(other == *this, ""); difference(sz_, dataGPU_, other.getDataGPU()); return *this; } void Tensor::setDataGPU(const snFloat* data, const snSize& nsz){ size_t nnsz = nsz.size(); ASSERT_MESS(data && (nnsz > 0), ""); if (sz_.size() < nnsz){ if (dataGPU_) cuAssert(cudaFree(dataGPU_)); cuAssert(cudaMalloc(&dataGPU_, nnsz * sizeof(snFloat))); } cuAssert(cudaMemcpy(dataGPU_, data, nnsz * sizeof(snFloat), cudaMemcpyKind::cudaMemcpyDeviceToDevice)); sz_ = nsz; } void Tensor::setDataCPU(const snFloat* data, const snSize& nsz){ size_t nnsz = nsz.size(); ASSERT_MESS(data && (nnsz > 0), ""); if (sz_.size() < nnsz){ if (dataGPU_) cuAssert(cudaFree(dataGPU_)); cuAssert(cudaMalloc(&dataGPU_, nnsz * sizeof(snFloat))); } cuAssert(cudaMemcpy(dataGPU_, data, nnsz * sizeof(snFloat), cudaMemcpyKind::cudaMemcpyHostToDevice)); sz_ = nsz; } snFloat* Tensor::getDataGPU() const{ return dataGPU_; } snFloat* Tensor::getDataCPU() const{ size_t csz = sz_.size(); dataCPU_ = (snFloat*)realloc(dataCPU_, csz * sizeof(snFloat)); cuAssert(cudaMemcpy(dataCPU_, dataGPU_, csz * sizeof(snFloat), cudaMemcpyKind::cudaMemcpyDeviceToHost)); return dataCPU_; } void Tensor::resize(const snSize& nsz){ size_t nnsz = nsz.size(), csz = sz_.size(); ASSERT_MESS(nnsz > 0, ""); if (csz < nnsz){ snFloat* mem = nullptr; cuAssert(cudaMalloc(&mem, nnsz * sizeof(snFloat))); if (dataGPU_){ if (csz > 0) cuAssert(cudaMemcpy(mem, dataGPU_, csz * sizeof(snFloat), cudaMemcpyKind::cudaMemcpyDeviceToDevice)); cuAssert(cudaFree(dataGPU_)); } dataGPU_ = mem; cuAssert(cudaMemset(dataGPU_ + csz, 0, (nnsz - csz) * sizeof(snFloat))); } sz_ = nsz; }
6ea2be350544174d4eed1ed9a1c850040be845d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> // includes, project #include "2Dconvolution.h" #include "2Dconvolution_gold.cpp" using namespace std; //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int CompareResults(float* A, float* B, int width, int height, float eps); bool ReadParams(int* params, int size, char* file_name); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification //////////////////////////////////////////////////////////////////////////////// __constant__ float sM[KERNEL_SIZE][KERNEL_SIZE]; __global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P) { // Your code comes here... int tx = threadIdx.x; int ty = threadIdx.y; int x = tx + blockIdx.x * blockDim.x; int y = ty + blockIdx.y * blockDim.y; int tid = x + y * N.width; int KR = KERNEL_SIZE/2; int i, j; // Load M into constant memory /*__constant__ float sM[KERNEL_SIZE][KERNEL_SIZE]; if (x < KERNEL_SIZE && y < KERNEL_SIZE) sM[y][x] = M.elements[x + y * M.width];*/ __shared__ float sN[BLOCK_SIZE + 4][BLOCK_SIZE + 4]; // Handle 4 corner cases of P i = x - KR; j = y - KR; if (i < 0 || j < 0) sN[ty][tx] = 0.f; else //sN[tx][ty] = 7.f; sN[ty][tx] = N.elements[tid - KR - KR * N.width]; __syncthreads(); i = x + KR; j = y - KR; if (i > N.width - 1 || j < 0) sN[ty][tx + KR + KR] = 0.f; else //sN[tx + KR + KR][ty] = 7.f; sN[ty][tx + KR + KR] = N.elements[tid + KR - KR * N.width]; __syncthreads(); i = x - KR; j = y + KR; if (i < 0 || j > N.height - 1) sN[ty + KR + KR][tx] = 0.f; else //sN[tx][ty + KR + KR] = 7.f; sN[ty + KR + KR][tx] = N.elements[tid - KR + KR * N.width]; __syncthreads(); i = x + KR; j = y + KR; if (i > N.width - 1 || j > N.height -1) sN[ty + KR + KR][tx + KR + KR] = 0.f; else //sN[tx + KR + KR][ty + KR + KR] = 7.f; sN[ty + KR + KR][tx + KR + KR] = N.elements[tid + KR + KR * N.width]; __syncthreads(); float sum = 0.f; // Convolute for (i = 0; i < KERNEL_SIZE; i++) for (j = 0; j < KERNEL_SIZE; j++) sum += sN[ty + i][tx + j] * sM[i][j]; if (tx < N.width && ty < N.height) P.elements[tid] = sum; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; srand(2013); if (argc == 2) { int in = atoi(argv[1]); // Allocate and initialize the matrices M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1); N = AllocateMatrix(in, in, 1); P = AllocateMatrix(N.height, N.width, 0); } else if(argc != 5 && argc != 4 && argc != 2) { // Allocate and initialize the matrices M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1); N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1); P = AllocateMatrix(N.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = (int*)malloc(2 * sizeof(int)); unsigned int data_read = 2; if(ReadParams(params, data_read, argv[1])){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0); N = AllocateMatrix(params[0], params[1], 0); P = AllocateMatrix(params[0], params[1], 0); (void)ReadFile(&M, argv[2]); (void)ReadFile(&N, argv[3]); } printf("Image size = %d x %d\n", P.width, P.height); // M * N on the device ConvolutionOnDevice(M, N, P); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); float cpu; hipEvent_t cpu_start, cpu_end; hipEventCreate(&cpu_start); hipEventCreate(&cpu_end); hipEventRecord(cpu_start, NULL); //computeGold(reference.elements, M.elements, N.elements, N.height, N.width); hipEventRecord(cpu_end, NULL); hipEventSynchronize(cpu_end); hipEventElapsedTime(&cpu, cpu_start, cpu_end); //printf("CPU time = %f \n", cpu*1000); // in this case check if the result is equivalent to the expected soluion int count = CompareResults(reference.elements, P.elements, P.width , P.height, 0.01f); //printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { // WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); Matrix Nd = AllocateDeviceMatrix(N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); // Setup the execution configuration dim3 grid((P.width + BLOCK_SIZE -1)/BLOCK_SIZE, (P.height + BLOCK_SIZE -1)/BLOCK_SIZE, 1); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); float gpu; hipEvent_t gpu_start, gpu_end; hipEventCreate(&gpu_start); hipEventCreate(&gpu_end); hipEventRecord(gpu_start, NULL); //CopyToDeviceMatrix(Md, M); hipMemcpyToSymbol(sM, M.elements, M.width*M.height*sizeof(float)); CopyToDeviceMatrix(Nd, N); CopyToDeviceMatrix(Pd, P); // Clear memory // Launch the device computation threads! hipLaunchKernelGGL(( ConvolutionKernel), dim3(grid), dim3(block), 0, 0, Md, Nd, Pd); // Read P from the device CopyFromDeviceMatrix(P, Pd); hipEventRecord(gpu_end, NULL); hipEventSynchronize(gpu_end); hipEventElapsedTime(&gpu, gpu_start, gpu_end); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); printf("GPU time = %f \n", gpu*1000); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); if(rand() % 2) M.elements[i] = - M.elements[i]; } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } //compare the data stored in two arrays on the host int CompareResults(float* A, float* B, int width, int height, float eps) { int count = 0; for (unsigned int i = 0; i < width*height; i++) { float error = A[i] - B[i]; if(error>eps) count++; } //printf("No. of differences = %d\n", count); return count; } bool ReadParams(int* params, int size, char* file_name){ ifstream ifile(file_name); int i=0; for(int i=0; i<size; i++){ if(ifile.fail()==false){ ifile>>params[i]; } } return (i==size)? 1:0; } // Read a 16x16 floating point matrix in from file int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height * M->width; std::ifstream ifile(file_name); for(unsigned int i = 0; i < data_read; i++){ ifile>>M->elements[i]; } ifile.close(); return data_read; } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { std::ofstream ofile(file_name); for(unsigned int i = 0; i < M.width*M.height; i++){ ofile<<M.elements[i]; } ofile.close(); }
6ea2be350544174d4eed1ed9a1c850040be845d9.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> // includes, project #include "2Dconvolution.h" #include "2Dconvolution_gold.cpp" using namespace std; //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int CompareResults(float* A, float* B, int width, int height, float eps); bool ReadParams(int* params, int size, char* file_name); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification //////////////////////////////////////////////////////////////////////////////// __constant__ float sM[KERNEL_SIZE][KERNEL_SIZE]; __global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P) { // Your code comes here... int tx = threadIdx.x; int ty = threadIdx.y; int x = tx + blockIdx.x * blockDim.x; int y = ty + blockIdx.y * blockDim.y; int tid = x + y * N.width; int KR = KERNEL_SIZE/2; int i, j; // Load M into constant memory /*__constant__ float sM[KERNEL_SIZE][KERNEL_SIZE]; if (x < KERNEL_SIZE && y < KERNEL_SIZE) sM[y][x] = M.elements[x + y * M.width];*/ __shared__ float sN[BLOCK_SIZE + 4][BLOCK_SIZE + 4]; // Handle 4 corner cases of P i = x - KR; j = y - KR; if (i < 0 || j < 0) sN[ty][tx] = 0.f; else //sN[tx][ty] = 7.f; sN[ty][tx] = N.elements[tid - KR - KR * N.width]; __syncthreads(); i = x + KR; j = y - KR; if (i > N.width - 1 || j < 0) sN[ty][tx + KR + KR] = 0.f; else //sN[tx + KR + KR][ty] = 7.f; sN[ty][tx + KR + KR] = N.elements[tid + KR - KR * N.width]; __syncthreads(); i = x - KR; j = y + KR; if (i < 0 || j > N.height - 1) sN[ty + KR + KR][tx] = 0.f; else //sN[tx][ty + KR + KR] = 7.f; sN[ty + KR + KR][tx] = N.elements[tid - KR + KR * N.width]; __syncthreads(); i = x + KR; j = y + KR; if (i > N.width - 1 || j > N.height -1) sN[ty + KR + KR][tx + KR + KR] = 0.f; else //sN[tx + KR + KR][ty + KR + KR] = 7.f; sN[ty + KR + KR][tx + KR + KR] = N.elements[tid + KR + KR * N.width]; __syncthreads(); float sum = 0.f; // Convolute for (i = 0; i < KERNEL_SIZE; i++) for (j = 0; j < KERNEL_SIZE; j++) sum += sN[ty + i][tx + j] * sM[i][j]; if (tx < N.width && ty < N.height) P.elements[tid] = sum; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; srand(2013); if (argc == 2) { int in = atoi(argv[1]); // Allocate and initialize the matrices M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1); N = AllocateMatrix(in, in, 1); P = AllocateMatrix(N.height, N.width, 0); } else if(argc != 5 && argc != 4 && argc != 2) { // Allocate and initialize the matrices M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1); N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1); P = AllocateMatrix(N.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = (int*)malloc(2 * sizeof(int)); unsigned int data_read = 2; if(ReadParams(params, data_read, argv[1])){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0); N = AllocateMatrix(params[0], params[1], 0); P = AllocateMatrix(params[0], params[1], 0); (void)ReadFile(&M, argv[2]); (void)ReadFile(&N, argv[3]); } printf("Image size = %d x %d\n", P.width, P.height); // M * N on the device ConvolutionOnDevice(M, N, P); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); float cpu; cudaEvent_t cpu_start, cpu_end; cudaEventCreate(&cpu_start); cudaEventCreate(&cpu_end); cudaEventRecord(cpu_start, NULL); //computeGold(reference.elements, M.elements, N.elements, N.height, N.width); cudaEventRecord(cpu_end, NULL); cudaEventSynchronize(cpu_end); cudaEventElapsedTime(&cpu, cpu_start, cpu_end); //printf("CPU time = %f \n", cpu*1000); // in this case check if the result is equivalent to the expected soluion int count = CompareResults(reference.elements, P.elements, P.width , P.height, 0.01f); //printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { // WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); Matrix Nd = AllocateDeviceMatrix(N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); // Setup the execution configuration dim3 grid((P.width + BLOCK_SIZE -1)/BLOCK_SIZE, (P.height + BLOCK_SIZE -1)/BLOCK_SIZE, 1); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); float gpu; cudaEvent_t gpu_start, gpu_end; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_end); cudaEventRecord(gpu_start, NULL); //CopyToDeviceMatrix(Md, M); cudaMemcpyToSymbol(sM, M.elements, M.width*M.height*sizeof(float)); CopyToDeviceMatrix(Nd, N); CopyToDeviceMatrix(Pd, P); // Clear memory // Launch the device computation threads! ConvolutionKernel<<<grid, block>>>(Md, Nd, Pd); // Read P from the device CopyFromDeviceMatrix(P, Pd); cudaEventRecord(gpu_end, NULL); cudaEventSynchronize(gpu_end); cudaEventElapsedTime(&gpu, gpu_start, gpu_end); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); printf("GPU time = %f \n", gpu*1000); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); if(rand() % 2) M.elements[i] = - M.elements[i]; } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } //compare the data stored in two arrays on the host int CompareResults(float* A, float* B, int width, int height, float eps) { int count = 0; for (unsigned int i = 0; i < width*height; i++) { float error = A[i] - B[i]; if(error>eps) count++; } //printf("No. of differences = %d\n", count); return count; } bool ReadParams(int* params, int size, char* file_name){ ifstream ifile(file_name); int i=0; for(int i=0; i<size; i++){ if(ifile.fail()==false){ ifile>>params[i]; } } return (i==size)? 1:0; } // Read a 16x16 floating point matrix in from file int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height * M->width; std::ifstream ifile(file_name); for(unsigned int i = 0; i < data_read; i++){ ifile>>M->elements[i]; } ifile.close(); return data_read; } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { std::ofstream ofile(file_name); for(unsigned int i = 0; i < M.width*M.height; i++){ ofile<<M.elements[i]; } ofile.close(); }
a65c8f5740bc6161ad9bd50c77dee4d1231ede40.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "weight_vector_bound_cuda_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <hip/hip_runtime.h> #include <boost/format.hpp> #include <stack> #include "../debug_util.h" #include <boost/filesystem.hpp> namespace nnforge { namespace cuda { __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } __global__ void dropout_kernel( float * __restrict neurons, const float * __restrict random_buf, float dropout_rate, int offset, unsigned int mask, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { unsigned int random_elem_id = (elem_id + offset) & mask; if (random_buf[random_elem_id] < dropout_rate) neurons[elem_id] = 0.0F; } } unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, const std::map<unsigned int, float>& layer_to_dropout_rate_map, const std::map<unsigned int, weight_vector_bound>& layer_to_weight_vector_bound_map, float weight_decay, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef, layer_to_dropout_rate_map, layer_to_weight_vector_bound_map, weight_decay) , cuda_config(cuda_config) { const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); for(std::map<unsigned int, weight_vector_bound>::const_iterator it = this->layer_to_weight_vector_bound_map.begin(); it != this->layer_to_weight_vector_bound_map.end(); ++it) { unsigned int layer_id = it->first; if (layer_id < testing_layer_count) throw neural_network_exception((boost::format("Weight vector bound is specified for layer %1% while it is in testing part (consisting of %2% layers) of the updater") % layer_id % testing_layer_count).str()); weight_vector_bounds.insert(std::make_pair(layer_id, single_weight_vector_bound_factory::get_const_instance().create_weight_vector_bound(layer_list[layer_id], cuda_config))); } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } std::vector<testing_result_smart_ptr> network_updater_cuda::actual_update( supervised_data_reader& reader, const std::vector<network_data_smart_ptr>& learning_rate_vector_list, std::vector<network_data_smart_ptr>& data_list) { std::vector<testing_result_smart_ptr> res; reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); unsigned int updater_entry_count = static_cast<unsigned int>(data_list.size()); if (updater_entry_count == 0) return res; for(unsigned int i = 0; i < learning_rate_vector_list.size(); ++i) res.push_back(testing_result_smart_ptr(new testing_result(ef))); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = get_data(data_list); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > learning_rate_data = get_learning_rate(learning_rate_vector_list); buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double) * updater_entry_count); // error buffer if (!random_uniform_list.empty()) buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = learning_rate_data.begin(); it != learning_rate_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = std::min<unsigned int>(std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(updater_entry_count * sizeof(double))); cuda_linear_buffer_device_smart_ptr random_uniform_buf; if (!random_uniform_list.empty()) { random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float))); cuda_safe_call(hipMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), hipMemcpyHostToDevice, *command_stream)); } cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } std::map<unsigned int, std::vector<cuda_linear_buffer_device_smart_ptr> > weight_vector_bound_buffers; for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) weight_vector_bound_buffers.insert(std::make_pair(it->first, it->second->allocate_additional_buffers(max_entry_count))); cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float))); float * output = *output_host_buf; // zero mse cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, updater_entry_count, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } random_generator gen = rnd::get_random_generator(); nnforge_uniform_int_distribution<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1)); unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1); unsigned int entries_processed_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(max_entry_count, entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), *data_stream); async_reader.start(); } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( convert_compacted_to_raw_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream, *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(hipMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), hipMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, input_and_additional_buffers_pack_it->first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_it->get_neuron_count(), offset); } (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } // Apply dropout to the input of the first updater layer { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, updater_input_and_all_buffers_pack[0].first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(), offset); } } for(unsigned int input_entry_id = 0; input_entry_id < entries_available_for_processing_count; ++input_entry_id) { std::stack<unsigned int> offset_list; // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_id, ++layer_config_it) { if (it != updater_list.begin()) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); offset_list.push(offset); enqueue_dropout( *command_stream, random_uniform_buf, input_and_all_buffers_pack_it->first, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_test( it == updater_list.begin() ? input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); } } // Compute errors { ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, input_entry_id, output_neuron_count, updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator learning_rate_data_it = learning_rate_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1; layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1; std::vector<std::vector<unsigned int> >::reverse_iterator incoming_weight_count_it = incoming_weight_count_per_output_neuron_list_list.rbegin(); for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++learning_rate_data_it, ++output_errors_it, ++net_data_it, --reverse_layer_id, ++layer_config_it, ++incoming_weight_count_it) { if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); /* { cuda_linear_buffer_device_smart_ptr buf = (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(hipMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%.txt") % reverse_layer_id).str()).string().c_str()); } */ std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = offset_list.top(); offset_list.pop(); enqueue_dropout( *command_stream, random_uniform_buf, (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? input_entry_id : 0, *command_stream, *net_data_it, *schema_data_it, *learning_rate_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count, weight_decay); weight_vector_bound_map::iterator bound_it = weight_vector_bounds.find(reverse_layer_id); if (bound_it != weight_vector_bounds.end()) { const weight_vector_bound& bound = layer_to_weight_vector_bound_map.find(reverse_layer_id)->second; const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers = weight_vector_bound_buffers.find(reverse_layer_id)->second; bound_it->second->enqueue_normalize_weights( *command_stream, bound, *net_data_it, additional_buffers, updater_entry_count, *incoming_weight_count_it); } } } if (((input_entry_id % 16) == 1) && cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // for(unsigned int input_entry_id entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(hipStreamSynchronize(*data_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } read_data(net_data, data_list, *command_stream); std::vector<double> error_list(updater_entry_count); cuda_safe_call(hipMemcpyAsync(&(*error_list.begin()), *error_buf, error_list.size() * sizeof(double), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); for(unsigned int i = 0; i < updater_entry_count; ++i) res[i]->init(error_list[i], entries_processed_count); return res; } void network_updater_cuda::layer_config_list_modified() { layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); incoming_weight_count_per_output_neuron_list_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count), (it_conf > layer_config_list.begin() + testing_layer_count))); incoming_weight_count_per_output_neuron_list_list.push_back(updater_list.back()->get_incoming_weight_count_per_output_neuron_list()); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_data(const std::vector<network_data_smart_ptr>& data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<layer_data_smart_ptr> data_list; for(int j = 0; j < data.size(); ++j) data_list.push_back(data[j]->at(i + testing_layer_count)); std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_data(data_list); res.push_back(device_data); } return res; } std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_learning_rate(const std::vector<network_data_smart_ptr>& learning_rate) const { std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<const_layer_data_smart_ptr> data_list; for(int j = 0; j < learning_rate.size(); ++j) data_list.push_back(learning_rate[j]->at(i + testing_layer_count)); std::vector<const_cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_learning_rate(data_list); res.push_back(device_data); } return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, std::vector<network_data_smart_ptr>& res, hipStream_t stream_id) const { const network_data_smart_ptr& first_data = res.front(); unsigned int layer_id = 0; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) { std::vector<layer_data_smart_ptr> host_data_list; for(std::vector<network_data_smart_ptr>::const_iterator sample_it = res.begin(); sample_it != res.end(); sample_it++) host_data_list.push_back((*sample_it)->at(layer_id + testing_layer_count)); updater_list[layer_id]->get_data_from_device(*src_it, host_data_list); } } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration, updater_entry_count); } unsigned int network_updater_cuda::get_max_batch_size() const { buffer_cuda_size_configuration buffer_configuration; for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration); return cuda_config->get_max_entry_count(buffer_configuration, 0.5F); } void network_updater_cuda::enqueue_dropout( hipStream_t stream_id, const_cuda_linear_buffer_device_smart_ptr random_buffer, cuda_linear_buffer_device_smart_ptr target_buffer, float dropout_rate, unsigned int mask, unsigned int elem_count, unsigned int offset_in_random_list) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( dropout_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *target_buffer, *random_buffer, dropout_rate, offset_in_random_list, mask, elem_count); } } }
a65c8f5740bc6161ad9bd50c77dee4d1231ede40.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "weight_vector_bound_cuda_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <cuda_runtime.h> #include <boost/format.hpp> #include <stack> #include "../debug_util.h" #include <boost/filesystem.hpp> namespace nnforge { namespace cuda { __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } __global__ void dropout_kernel( float * __restrict neurons, const float * __restrict random_buf, float dropout_rate, int offset, unsigned int mask, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { unsigned int random_elem_id = (elem_id + offset) & mask; if (random_buf[random_elem_id] < dropout_rate) neurons[elem_id] = 0.0F; } } unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, const std::map<unsigned int, float>& layer_to_dropout_rate_map, const std::map<unsigned int, weight_vector_bound>& layer_to_weight_vector_bound_map, float weight_decay, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef, layer_to_dropout_rate_map, layer_to_weight_vector_bound_map, weight_decay) , cuda_config(cuda_config) { const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); for(std::map<unsigned int, weight_vector_bound>::const_iterator it = this->layer_to_weight_vector_bound_map.begin(); it != this->layer_to_weight_vector_bound_map.end(); ++it) { unsigned int layer_id = it->first; if (layer_id < testing_layer_count) throw neural_network_exception((boost::format("Weight vector bound is specified for layer %1% while it is in testing part (consisting of %2% layers) of the updater") % layer_id % testing_layer_count).str()); weight_vector_bounds.insert(std::make_pair(layer_id, single_weight_vector_bound_factory::get_const_instance().create_weight_vector_bound(layer_list[layer_id], cuda_config))); } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } std::vector<testing_result_smart_ptr> network_updater_cuda::actual_update( supervised_data_reader& reader, const std::vector<network_data_smart_ptr>& learning_rate_vector_list, std::vector<network_data_smart_ptr>& data_list) { std::vector<testing_result_smart_ptr> res; reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); unsigned int updater_entry_count = static_cast<unsigned int>(data_list.size()); if (updater_entry_count == 0) return res; for(unsigned int i = 0; i < learning_rate_vector_list.size(); ++i) res.push_back(testing_result_smart_ptr(new testing_result(ef))); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = get_data(data_list); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > learning_rate_data = get_learning_rate(learning_rate_vector_list); buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double) * updater_entry_count); // error buffer if (!random_uniform_list.empty()) buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = learning_rate_data.begin(); it != learning_rate_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = std::min<unsigned int>(std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(updater_entry_count * sizeof(double))); cuda_linear_buffer_device_smart_ptr random_uniform_buf; if (!random_uniform_list.empty()) { random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float))); cuda_safe_call(cudaMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), cudaMemcpyHostToDevice, *command_stream)); } cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } std::map<unsigned int, std::vector<cuda_linear_buffer_device_smart_ptr> > weight_vector_bound_buffers; for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) weight_vector_bound_buffers.insert(std::make_pair(it->first, it->second->allocate_additional_buffers(max_entry_count))); cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float))); float * output = *output_host_buf; // zero mse cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, updater_entry_count, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } random_generator gen = rnd::get_random_generator(); nnforge_uniform_int_distribution<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1)); unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1); unsigned int entries_processed_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(max_entry_count, entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), *data_stream); async_reader.start(); } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); convert_compacted_to_raw_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>( *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(cudaMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), cudaMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, input_and_additional_buffers_pack_it->first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_it->get_neuron_count(), offset); } (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } // Apply dropout to the input of the first updater layer { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, updater_input_and_all_buffers_pack[0].first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(), offset); } } for(unsigned int input_entry_id = 0; input_entry_id < entries_available_for_processing_count; ++input_entry_id) { std::stack<unsigned int> offset_list; // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_id, ++layer_config_it) { if (it != updater_list.begin()) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); offset_list.push(offset); enqueue_dropout( *command_stream, random_uniform_buf, input_and_all_buffers_pack_it->first, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_test( it == updater_list.begin() ? input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); } } // Compute errors { ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, input_entry_id, output_neuron_count, updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator learning_rate_data_it = learning_rate_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1; layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1; std::vector<std::vector<unsigned int> >::reverse_iterator incoming_weight_count_it = incoming_weight_count_per_output_neuron_list_list.rbegin(); for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++learning_rate_data_it, ++output_errors_it, ++net_data_it, --reverse_layer_id, ++layer_config_it, ++incoming_weight_count_it) { if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); /* { cuda_linear_buffer_device_smart_ptr buf = (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer; std::vector<float> inp_err(buf->get_size() / sizeof(float)); cuda_safe_call(cudaMemcpyAsync(&(*inp_err.begin()), *buf, inp_err.size() * sizeof(float), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); boost::filesystem::path dir = "Debug"; dir /= "GPU"; boost::filesystem::create_directories(dir); debug_util::dump_list( &(*inp_err.begin()), inp_err.size(), (dir / (boost::format("input_errors_%1%.txt") % reverse_layer_id).str()).string().c_str()); } */ std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = offset_list.top(); offset_list.pop(); enqueue_dropout( *command_stream, random_uniform_buf, (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? input_entry_id : 0, *command_stream, *net_data_it, *schema_data_it, *learning_rate_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count, weight_decay); weight_vector_bound_map::iterator bound_it = weight_vector_bounds.find(reverse_layer_id); if (bound_it != weight_vector_bounds.end()) { const weight_vector_bound& bound = layer_to_weight_vector_bound_map.find(reverse_layer_id)->second; const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers = weight_vector_bound_buffers.find(reverse_layer_id)->second; bound_it->second->enqueue_normalize_weights( *command_stream, bound, *net_data_it, additional_buffers, updater_entry_count, *incoming_weight_count_it); } } } if (((input_entry_id % 16) == 1) && cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // for(unsigned int input_entry_id entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(cudaStreamSynchronize(*data_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } read_data(net_data, data_list, *command_stream); std::vector<double> error_list(updater_entry_count); cuda_safe_call(cudaMemcpyAsync(&(*error_list.begin()), *error_buf, error_list.size() * sizeof(double), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); for(unsigned int i = 0; i < updater_entry_count; ++i) res[i]->init(error_list[i], entries_processed_count); return res; } void network_updater_cuda::layer_config_list_modified() { layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); incoming_weight_count_per_output_neuron_list_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count), (it_conf > layer_config_list.begin() + testing_layer_count))); incoming_weight_count_per_output_neuron_list_list.push_back(updater_list.back()->get_incoming_weight_count_per_output_neuron_list()); } } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_data(const std::vector<network_data_smart_ptr>& data) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<layer_data_smart_ptr> data_list; for(int j = 0; j < data.size(); ++j) data_list.push_back(data[j]->at(i + testing_layer_count)); std::vector<cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_data(data_list); res.push_back(device_data); } return res; } std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::get_learning_rate(const std::vector<network_data_smart_ptr>& learning_rate) const { std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res; for(int i = 0; i < updater_list.size(); ++i) { std::vector<const_layer_data_smart_ptr> data_list; for(int j = 0; j < learning_rate.size(); ++j) data_list.push_back(learning_rate[j]->at(i + testing_layer_count)); std::vector<const_cuda_linear_buffer_device_smart_ptr> device_data = updater_list[i]->get_learning_rate(data_list); res.push_back(device_data); } return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, std::vector<network_data_smart_ptr>& res, cudaStream_t stream_id) const { const network_data_smart_ptr& first_data = res.front(); unsigned int layer_id = 0; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) { std::vector<layer_data_smart_ptr> host_data_list; for(std::vector<network_data_smart_ptr>::const_iterator sample_it = res.begin(); sample_it != res.end(); sample_it++) host_data_list.push_back((*sample_it)->at(layer_id + testing_layer_count)); updater_list[layer_id]->get_data_from_device(*src_it, host_data_list); } } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration, updater_entry_count); } unsigned int network_updater_cuda::get_max_batch_size() const { buffer_cuda_size_configuration buffer_configuration; for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration); return cuda_config->get_max_entry_count(buffer_configuration, 0.5F); } void network_updater_cuda::enqueue_dropout( cudaStream_t stream_id, const_cuda_linear_buffer_device_smart_ptr random_buffer, cuda_linear_buffer_device_smart_ptr target_buffer, float dropout_rate, unsigned int mask, unsigned int elem_count, unsigned int offset_in_random_list) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); dropout_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *target_buffer, *random_buffer, dropout_rate, offset_in_random_list, mask, elem_count); } } }
675d88221ed0bdd20e90ee508603982190761b4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_helpers.cuh" #include "stdafx.h" #include <iostream> #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/scatter.h> #include <thrust/gather.h> #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/execution_policy.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <algorithm> #define MARK_VAL -1 template<typename Left, typename Right> __global__ void join_partitions(const Left *left, const int *leftStartIndexes, const int *leftPartitionSizes, const Right *right, const int *rightStartIndexes, const int *rightPartitionSizes, thrust::tuple<Left, Right> *result, size_t partitionCount, const int *resultStartIndexes) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < partitionCount) { int leftOffset = leftStartIndexes[idx]; int rightOffset = rightStartIndexes[idx]; int offset = resultStartIndexes[idx]; for (int leftIndex = 0; leftIndex < leftPartitionSizes[idx]; leftIndex++) { for (int rightIndex = 0; rightIndex < rightPartitionSizes[idx]; rightIndex++) { result[offset] = thrust::make_tuple(left[leftOffset + leftIndex], right[rightOffset + rightIndex]); offset++; } } } } template<typename Input> struct order_key_selector : public thrust::unary_function<Input, int> { __host__ __device__ int operator()(const Input& input) const { return input.order_key; } }; struct mark_multiply_func { template <typename T1, typename T2> __host__ __device__ int operator()(T1 &z1, T2 &z2){ int res = MARK_VAL; if (thrust::get<0>(z1) == thrust::get<0>(z2)){ res = thrust::get<1>(z1) * thrust::get<1>(z2); } return res; } }; struct mark_test_func { template <typename T> __host__ __device__ bool operator()(T t){ if (thrust::get<1>(t) == MARK_VAL) return true; return false; } }; template<typename Left, typename Right> std::vector<std::tuple<Left, Right>>& sort_merge_join(std::vector<Left>& h_leftItems, std::vector<Right>& h_rightItems) { std::clock_t h_start = std::clock(); // Copy host data to the device thrust::device_vector<Left> d_leftItems(h_leftItems); thrust::device_vector<Right> d_rightItems(h_rightItems); // Allocate space for the row keys on the device thrust::device_vector<int> d_leftKeys(h_leftItems.size()); thrust::device_vector<int> d_rightKeys(h_rightItems.size()); //std::cout << "Copying input and allocating space took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); //std::clock_t h_totalStart = std::clock(); // Create device vectors containing the keys for the join operation order_key_selector<Left> leftOperator; order_key_selector<Right> rightOperator; thrust::transform(d_leftItems.begin(), d_leftItems.end(), d_leftKeys.begin(), leftOperator); thrust::transform(d_rightItems.begin(), d_rightItems.end(), d_rightKeys.begin(), rightOperator); //std::cout << "Selecting join keys took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); // Sort the data using the keys (used for partitioning the data) thrust::sort_by_key(d_leftKeys.begin(), d_leftKeys.end(), d_leftItems.begin()); thrust::sort_by_key(d_rightKeys.begin(), d_rightKeys.end(), d_rightItems.begin()); //std::cout << "Sorting data took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); // Allocate space for the parition keys and sizes thrust::device_vector<int> d_leftCountKeys(h_leftItems.size()); thrust::device_vector<int> d_rightCountKeys(h_rightItems.size()); thrust::device_vector<int> d_leftCounts(h_leftItems.size()); thrust::device_vector<int> d_rightCounts(h_rightItems.size()); //std::cout << "Allocating space for partition keys and values took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); // Calculate the partition keys and sizes auto h_newLeftEnd = thrust::reduce_by_key(d_leftKeys.begin(), d_leftKeys.end(), thrust::make_constant_iterator(1), d_leftCountKeys.begin(), d_leftCounts.begin()); auto h_newRightEnd = thrust::reduce_by_key(d_rightKeys.begin(), d_rightKeys.end(), thrust::make_constant_iterator(1), d_rightCountKeys.begin(), d_rightCounts.begin()); //std::cout << "Calculating partition keys and sizes took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); int64_t h_leftCount = h_newLeftEnd.first - d_leftCountKeys.begin(); int64_t h_rightCount = h_newRightEnd.first - d_rightCountKeys.begin(); // Calculate partition start indexes // Based on http://stackoverflow.com/a/34371396/2041231 thrust::device_vector<int> d_mergedKeys(h_leftCount + h_rightCount); thrust::device_vector<int> d_mergedValues(h_leftCount + h_rightCount); thrust::device_vector<int> d_startIndexes(h_leftCount + h_rightCount - 1); // Create list with keys and values for both the left and right side thrust::merge_by_key(d_leftCountKeys.begin(), d_leftCountKeys.begin() + h_leftCount, d_rightCountKeys.begin(), d_rightCountKeys.begin() + h_rightCount, d_leftCounts.begin(), d_rightCounts.begin(), d_mergedKeys.begin(), d_mergedValues.begin()); // Compute multiplications of each pair of elements for which the key matches (=partition sizes) thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin(), d_mergedValues.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.end() - 1, d_mergedValues.end() - 1)), thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin() + 1, d_mergedValues.begin() + 1)), d_startIndexes.begin(), mark_multiply_func()); // Remove elements for which the key does not match size_t h_filteredResultSize = thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin(), d_startIndexes.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.end() - 1, d_startIndexes.end())), mark_test_func()) - thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin(), d_startIndexes.begin())); d_startIndexes.resize(h_filteredResultSize); // Compute the prefix sum to get the start indexes from the partition sizes thrust::exclusive_scan(d_startIndexes.begin(), d_startIndexes.begin() + h_filteredResultSize, d_startIndexes.begin()); //std::cout << "Calculating partition start indexes took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); thrust::device_vector<int> d_leftStartIndexes(h_leftCount); thrust::device_vector<int> d_rightStartIndexes(h_rightCount); thrust::exclusive_scan(d_leftCounts.begin(), d_leftCounts.begin() + h_leftCount, d_leftStartIndexes.begin()); thrust::exclusive_scan(d_rightCounts.begin(), d_rightCounts.begin() + h_rightCount, d_rightStartIndexes.begin()); //std::cout << "Calculating join block start indexes took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); unsigned int h_partitionCount = (unsigned int)d_startIndexes.size(); int h_joinResultSize = 0; if (h_partitionCount > 0) { h_joinResultSize = *(d_startIndexes.end() - 1) + *(d_leftCounts.end() - 1) * *(d_rightCounts.end() - 1) + 1; } thrust::device_vector<thrust::tuple<Left, Right>> d_joinResult(h_joinResultSize); unsigned int h_blockSize = 256; unsigned int h_numBlocks = (h_partitionCount + (h_blockSize - 1)) / h_blockSize; double h_preTime = GetElapsedTime(h_start); #if DEBUG std::cout << "Join preparation took " << h_preTime << "ms\n"; #endif h_start = std::clock(); hipLaunchKernelGGL(( join_partitions), dim3(h_numBlocks), dim3(h_blockSize), 0, 0, thrust::raw_pointer_cast(d_leftItems.data()), thrust::raw_pointer_cast(d_leftStartIndexes.data()), thrust::raw_pointer_cast(d_leftCounts.data()), thrust::raw_pointer_cast(d_rightItems.data()), thrust::raw_pointer_cast(d_rightStartIndexes.data()), thrust::raw_pointer_cast(d_rightCounts.data()), thrust::raw_pointer_cast(d_joinResult.data()), h_partitionCount, thrust::raw_pointer_cast(d_startIndexes.data())); // hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. handleCudaError(hipDeviceSynchronize()); double h_computeTime = GetElapsedTime(h_start); #if DEBUG std::cout << "Calculating join result took " << h_computeTime << "ms\n"; #endif //std::cout << "Total join calculations took " << GetElapsedTime(h_totalStart) << "ms\n"; h_start = std::clock(); thrust::host_vector<thrust::tuple<Left, Right>> h_thrustResult = d_joinResult; std::vector<std::tuple<Left, Right>> &h_result = *new std::vector<std::tuple<Left, Right>>(h_thrustResult.size()); for (size_t i = 0; i < h_thrustResult.size(); i++) { h_result[i] = std::make_tuple(thrust::get<0>(h_thrustResult[i]), thrust::get<1>(h_thrustResult[i])); } double h_postTime = GetElapsedTime(h_start); std::cout << h_preTime << " " << h_computeTime << " " << h_postTime; #if DEBUG std::cout << "Copying results to host took " << h_postTime << "ms\n"; #endif return h_result; } template std::vector<std::tuple<Order, LineItem>>& sort_merge_join<Order, LineItem>(std::vector<Order>& orders, std::vector<LineItem>& items);
675d88221ed0bdd20e90ee508603982190761b4c.cu
#include "cuda_helpers.cuh" #include "stdafx.h" #include <iostream> #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/scatter.h> #include <thrust/gather.h> #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/execution_policy.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <algorithm> #define MARK_VAL -1 template<typename Left, typename Right> __global__ void join_partitions(const Left *left, const int *leftStartIndexes, const int *leftPartitionSizes, const Right *right, const int *rightStartIndexes, const int *rightPartitionSizes, thrust::tuple<Left, Right> *result, size_t partitionCount, const int *resultStartIndexes) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < partitionCount) { int leftOffset = leftStartIndexes[idx]; int rightOffset = rightStartIndexes[idx]; int offset = resultStartIndexes[idx]; for (int leftIndex = 0; leftIndex < leftPartitionSizes[idx]; leftIndex++) { for (int rightIndex = 0; rightIndex < rightPartitionSizes[idx]; rightIndex++) { result[offset] = thrust::make_tuple(left[leftOffset + leftIndex], right[rightOffset + rightIndex]); offset++; } } } } template<typename Input> struct order_key_selector : public thrust::unary_function<Input, int> { __host__ __device__ int operator()(const Input& input) const { return input.order_key; } }; struct mark_multiply_func { template <typename T1, typename T2> __host__ __device__ int operator()(T1 &z1, T2 &z2){ int res = MARK_VAL; if (thrust::get<0>(z1) == thrust::get<0>(z2)){ res = thrust::get<1>(z1) * thrust::get<1>(z2); } return res; } }; struct mark_test_func { template <typename T> __host__ __device__ bool operator()(T t){ if (thrust::get<1>(t) == MARK_VAL) return true; return false; } }; template<typename Left, typename Right> std::vector<std::tuple<Left, Right>>& sort_merge_join(std::vector<Left>& h_leftItems, std::vector<Right>& h_rightItems) { std::clock_t h_start = std::clock(); // Copy host data to the device thrust::device_vector<Left> d_leftItems(h_leftItems); thrust::device_vector<Right> d_rightItems(h_rightItems); // Allocate space for the row keys on the device thrust::device_vector<int> d_leftKeys(h_leftItems.size()); thrust::device_vector<int> d_rightKeys(h_rightItems.size()); //std::cout << "Copying input and allocating space took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); //std::clock_t h_totalStart = std::clock(); // Create device vectors containing the keys for the join operation order_key_selector<Left> leftOperator; order_key_selector<Right> rightOperator; thrust::transform(d_leftItems.begin(), d_leftItems.end(), d_leftKeys.begin(), leftOperator); thrust::transform(d_rightItems.begin(), d_rightItems.end(), d_rightKeys.begin(), rightOperator); //std::cout << "Selecting join keys took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); // Sort the data using the keys (used for partitioning the data) thrust::sort_by_key(d_leftKeys.begin(), d_leftKeys.end(), d_leftItems.begin()); thrust::sort_by_key(d_rightKeys.begin(), d_rightKeys.end(), d_rightItems.begin()); //std::cout << "Sorting data took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); // Allocate space for the parition keys and sizes thrust::device_vector<int> d_leftCountKeys(h_leftItems.size()); thrust::device_vector<int> d_rightCountKeys(h_rightItems.size()); thrust::device_vector<int> d_leftCounts(h_leftItems.size()); thrust::device_vector<int> d_rightCounts(h_rightItems.size()); //std::cout << "Allocating space for partition keys and values took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); // Calculate the partition keys and sizes auto h_newLeftEnd = thrust::reduce_by_key(d_leftKeys.begin(), d_leftKeys.end(), thrust::make_constant_iterator(1), d_leftCountKeys.begin(), d_leftCounts.begin()); auto h_newRightEnd = thrust::reduce_by_key(d_rightKeys.begin(), d_rightKeys.end(), thrust::make_constant_iterator(1), d_rightCountKeys.begin(), d_rightCounts.begin()); //std::cout << "Calculating partition keys and sizes took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); int64_t h_leftCount = h_newLeftEnd.first - d_leftCountKeys.begin(); int64_t h_rightCount = h_newRightEnd.first - d_rightCountKeys.begin(); // Calculate partition start indexes // Based on http://stackoverflow.com/a/34371396/2041231 thrust::device_vector<int> d_mergedKeys(h_leftCount + h_rightCount); thrust::device_vector<int> d_mergedValues(h_leftCount + h_rightCount); thrust::device_vector<int> d_startIndexes(h_leftCount + h_rightCount - 1); // Create list with keys and values for both the left and right side thrust::merge_by_key(d_leftCountKeys.begin(), d_leftCountKeys.begin() + h_leftCount, d_rightCountKeys.begin(), d_rightCountKeys.begin() + h_rightCount, d_leftCounts.begin(), d_rightCounts.begin(), d_mergedKeys.begin(), d_mergedValues.begin()); // Compute multiplications of each pair of elements for which the key matches (=partition sizes) thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin(), d_mergedValues.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.end() - 1, d_mergedValues.end() - 1)), thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin() + 1, d_mergedValues.begin() + 1)), d_startIndexes.begin(), mark_multiply_func()); // Remove elements for which the key does not match size_t h_filteredResultSize = thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin(), d_startIndexes.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.end() - 1, d_startIndexes.end())), mark_test_func()) - thrust::make_zip_iterator(thrust::make_tuple(d_mergedKeys.begin(), d_startIndexes.begin())); d_startIndexes.resize(h_filteredResultSize); // Compute the prefix sum to get the start indexes from the partition sizes thrust::exclusive_scan(d_startIndexes.begin(), d_startIndexes.begin() + h_filteredResultSize, d_startIndexes.begin()); //std::cout << "Calculating partition start indexes took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); thrust::device_vector<int> d_leftStartIndexes(h_leftCount); thrust::device_vector<int> d_rightStartIndexes(h_rightCount); thrust::exclusive_scan(d_leftCounts.begin(), d_leftCounts.begin() + h_leftCount, d_leftStartIndexes.begin()); thrust::exclusive_scan(d_rightCounts.begin(), d_rightCounts.begin() + h_rightCount, d_rightStartIndexes.begin()); //std::cout << "Calculating join block start indexes took " << GetElapsedTime(h_start) << "ms\n"; //h_start = std::clock(); unsigned int h_partitionCount = (unsigned int)d_startIndexes.size(); int h_joinResultSize = 0; if (h_partitionCount > 0) { h_joinResultSize = *(d_startIndexes.end() - 1) + *(d_leftCounts.end() - 1) * *(d_rightCounts.end() - 1) + 1; } thrust::device_vector<thrust::tuple<Left, Right>> d_joinResult(h_joinResultSize); unsigned int h_blockSize = 256; unsigned int h_numBlocks = (h_partitionCount + (h_blockSize - 1)) / h_blockSize; double h_preTime = GetElapsedTime(h_start); #if DEBUG std::cout << "Join preparation took " << h_preTime << "ms\n"; #endif h_start = std::clock(); join_partitions<<<h_numBlocks, h_blockSize>>>(thrust::raw_pointer_cast(d_leftItems.data()), thrust::raw_pointer_cast(d_leftStartIndexes.data()), thrust::raw_pointer_cast(d_leftCounts.data()), thrust::raw_pointer_cast(d_rightItems.data()), thrust::raw_pointer_cast(d_rightStartIndexes.data()), thrust::raw_pointer_cast(d_rightCounts.data()), thrust::raw_pointer_cast(d_joinResult.data()), h_partitionCount, thrust::raw_pointer_cast(d_startIndexes.data())); // cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. handleCudaError(cudaDeviceSynchronize()); double h_computeTime = GetElapsedTime(h_start); #if DEBUG std::cout << "Calculating join result took " << h_computeTime << "ms\n"; #endif //std::cout << "Total join calculations took " << GetElapsedTime(h_totalStart) << "ms\n"; h_start = std::clock(); thrust::host_vector<thrust::tuple<Left, Right>> h_thrustResult = d_joinResult; std::vector<std::tuple<Left, Right>> &h_result = *new std::vector<std::tuple<Left, Right>>(h_thrustResult.size()); for (size_t i = 0; i < h_thrustResult.size(); i++) { h_result[i] = std::make_tuple(thrust::get<0>(h_thrustResult[i]), thrust::get<1>(h_thrustResult[i])); } double h_postTime = GetElapsedTime(h_start); std::cout << h_preTime << " " << h_computeTime << " " << h_postTime; #if DEBUG std::cout << "Copying results to host took " << h_postTime << "ms\n"; #endif return h_result; } template std::vector<std::tuple<Order, LineItem>>& sort_merge_join<Order, LineItem>(std::vector<Order>& orders, std::vector<LineItem>& items);
180e755fc80d6910beb799db23a77d323bf82d8f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> //#define __DEBUG #define element_addr(a, m, n, d) (a + ((m) * (d) + n)) #define element(a, m, n, d) (((m >= 0)&&(m < d)&&(n >= 0)&&(n < d))? (a[(m) * (d) + n]) : 0) #define CUDA_CALL(cmd) do { \ if((err = cmd) != hipSuccess) { \ printf("(%d) Cuda Error:(%d) %s\n", __LINE__,int(err), hipGetErrorString(err) ); \ } \ } while(0) #define BLK_SZ 256 #define BLK_SIDE 16 /*__global__ void computeKernel(int *living, float *honeys[2], float *honeyr, int d, float rbee, float rflow) { //honeyr[threadIdx.x] = honeys[0][threadIdx.x]; //honeyr[threadIdx.x] = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; //honeyr[i*d + j] = i+j; *(element_addr(honeyr, i, j, d)) = element(honeyr,i-1,j-1,d); }*/ __global__ void computeKernelReal(int *living, float *honeyin,float *honeyout, int d, float rbee, float rflow) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; *(element_addr(honeyout, i, j, d)) = rflow * (element(honeyin, i-1, j-1, d) + element(honeyin, i-1, j, d) + element(honeyin, i-1, j+1, d) + element(honeyin, i, j-1, d) + element(honeyin, i, j+1, d) + element(honeyin, i+1, j-1, d) + element(honeyin, i+1, j, d) + element(honeyin, i+1, j+1, d) ) + (1.0 - 8.0 * rflow) * element(honeyin, i, j, d) + rbee * element(living, i, j, d); } int calculateGPU(const int *living, float *honey[2], int d, int n, float rbee, float rflow) { hipError_t err; clock_t start, end; hipEvent_t kstart, kstop; float ktime; double time; int i; /* PA2: Define your local variables here */ int *living_d; float *honeyin_d; float *honey_r; /* Set up device timers */ CUDA_CALL(hipSetDevice(0)); CUDA_CALL(hipEventCreate(&kstart)); CUDA_CALL(hipEventCreate(&kstop)); /* Start GPU end-to-end timer */ start = clock(); /* PA2: Add CUDA kernel call preparation code here */ CUDA_CALL(hipMalloc((void **)&living_d, d * d * sizeof(int))); CUDA_CALL(hipMalloc((void **)&honeyin_d, d * d * sizeof(float))); CUDA_CALL(hipMalloc((void **)&honey_r, d * d * sizeof(float))); CUDA_CALL(hipMemcpy(living_d, living, d * d * sizeof(int), hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(honeyin_d, honey[0], d * d * sizeof(float), hipMemcpyHostToDevice)); /* Start GPU computation timer */ CUDA_CALL(hipEventRecord(kstart, 0)); /* PA2: Add main honey level simulation loop here */ dim3 dimGrid(d/BLK_SIDE,d/BLK_SIDE); dim3 dimBlock(BLK_SIDE,BLK_SIDE); for (i=0;i< n;i++) { hipLaunchKernelGGL(( computeKernelReal), dim3(dimGrid),dim3(dimBlock), 0, 0, living_d,honeyin_d,honey_r,d,rbee,rflow); //CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(honeyin_d,honey_r,d * d * sizeof(float),hipMemcpyDeviceToDevice )); } //computeKernel<<<dimGrid,dimBlock>>>(living_d,honey_d,honey_r,d,rbee,rflow); /* Stop GPU computation timer */ CUDA_CALL(hipEventRecord(kstop, 0)); CUDA_CALL(hipEventSynchronize(kstop)); CUDA_CALL(hipEventElapsedTime(&ktime, kstart, kstop)); printf("GPU computation: %f msec\n", ktime); /* PA2: Add post CUDA kernel call processing and cleanup here */ //CUDA_CALL(hipMemcpy(honey[0],honey_d[resin],d * d * sizeof(float),hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(honey[1],honey_r,d * d * sizeof(float),hipMemcpyDeviceToHost)); /*printf("\nhoney[] after cuda kernel call -\n"); for(int i = 0; i < d; i++ ) { for(int j = 0; j < d; j++ ) { printf("%f ", element(honey[1], i, j, d)); } printf("\n"); }*/ CUDA_CALL(hipFree(living_d)); CUDA_CALL(hipFree(honeyin_d)); CUDA_CALL(hipFree(honey_r)); /* Stop GPU end-to-end timer and timer cleanup */ end = clock(); CUDA_CALL(hipEventDestroy(kstart)); CUDA_CALL(hipEventDestroy(kstop)); time = ((double)(end-start))/CLOCKS_PER_SEC; printf("GPU end-to-end: %lf sec\n", time); return 1; }
180e755fc80d6910beb799db23a77d323bf82d8f.cu
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> //#define __DEBUG #define element_addr(a, m, n, d) (a + ((m) * (d) + n)) #define element(a, m, n, d) (((m >= 0)&&(m < d)&&(n >= 0)&&(n < d))? (a[(m) * (d) + n]) : 0) #define CUDA_CALL(cmd) do { \ if((err = cmd) != cudaSuccess) { \ printf("(%d) Cuda Error:(%d) %s\n", __LINE__,int(err), cudaGetErrorString(err) ); \ } \ } while(0) #define BLK_SZ 256 #define BLK_SIDE 16 /*__global__ void computeKernel(int *living, float *honeys[2], float *honeyr, int d, float rbee, float rflow) { //honeyr[threadIdx.x] = honeys[0][threadIdx.x]; //honeyr[threadIdx.x] = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; //honeyr[i*d + j] = i+j; *(element_addr(honeyr, i, j, d)) = element(honeyr,i-1,j-1,d); }*/ __global__ void computeKernelReal(int *living, float *honeyin,float *honeyout, int d, float rbee, float rflow) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; *(element_addr(honeyout, i, j, d)) = rflow * (element(honeyin, i-1, j-1, d) + element(honeyin, i-1, j, d) + element(honeyin, i-1, j+1, d) + element(honeyin, i, j-1, d) + element(honeyin, i, j+1, d) + element(honeyin, i+1, j-1, d) + element(honeyin, i+1, j, d) + element(honeyin, i+1, j+1, d) ) + (1.0 - 8.0 * rflow) * element(honeyin, i, j, d) + rbee * element(living, i, j, d); } int calculateGPU(const int *living, float *honey[2], int d, int n, float rbee, float rflow) { cudaError_t err; clock_t start, end; cudaEvent_t kstart, kstop; float ktime; double time; int i; /* PA2: Define your local variables here */ int *living_d; float *honeyin_d; float *honey_r; /* Set up device timers */ CUDA_CALL(cudaSetDevice(0)); CUDA_CALL(cudaEventCreate(&kstart)); CUDA_CALL(cudaEventCreate(&kstop)); /* Start GPU end-to-end timer */ start = clock(); /* PA2: Add CUDA kernel call preparation code here */ CUDA_CALL(cudaMalloc((void **)&living_d, d * d * sizeof(int))); CUDA_CALL(cudaMalloc((void **)&honeyin_d, d * d * sizeof(float))); CUDA_CALL(cudaMalloc((void **)&honey_r, d * d * sizeof(float))); CUDA_CALL(cudaMemcpy(living_d, living, d * d * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(honeyin_d, honey[0], d * d * sizeof(float), cudaMemcpyHostToDevice)); /* Start GPU computation timer */ CUDA_CALL(cudaEventRecord(kstart, 0)); /* PA2: Add main honey level simulation loop here */ dim3 dimGrid(d/BLK_SIDE,d/BLK_SIDE); dim3 dimBlock(BLK_SIDE,BLK_SIDE); for (i=0;i< n;i++) { computeKernelReal<<<dimGrid,dimBlock>>>(living_d,honeyin_d,honey_r,d,rbee,rflow); //CUDA_CALL(cudaThreadSynchronize()); CUDA_CALL(cudaMemcpy(honeyin_d,honey_r,d * d * sizeof(float),cudaMemcpyDeviceToDevice )); } //computeKernel<<<dimGrid,dimBlock>>>(living_d,honey_d,honey_r,d,rbee,rflow); /* Stop GPU computation timer */ CUDA_CALL(cudaEventRecord(kstop, 0)); CUDA_CALL(cudaEventSynchronize(kstop)); CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop)); printf("GPU computation: %f msec\n", ktime); /* PA2: Add post CUDA kernel call processing and cleanup here */ //CUDA_CALL(cudaMemcpy(honey[0],honey_d[resin],d * d * sizeof(float),cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(honey[1],honey_r,d * d * sizeof(float),cudaMemcpyDeviceToHost)); /*printf("\nhoney[] after cuda kernel call -\n"); for(int i = 0; i < d; i++ ) { for(int j = 0; j < d; j++ ) { printf("%f ", element(honey[1], i, j, d)); } printf("\n"); }*/ CUDA_CALL(cudaFree(living_d)); CUDA_CALL(cudaFree(honeyin_d)); CUDA_CALL(cudaFree(honey_r)); /* Stop GPU end-to-end timer and timer cleanup */ end = clock(); CUDA_CALL(cudaEventDestroy(kstart)); CUDA_CALL(cudaEventDestroy(kstop)); time = ((double)(end-start))/CLOCKS_PER_SEC; printf("GPU end-to-end: %lf sec\n", time); return 1; }
8063b0075d53537c6d701cae229d1b72159b680e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 - 2021 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Adapted from https://github.com/abadams/permutohedral which has the following license... MIT License Copyright (c) 2020 Andrew Adams Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define BLOCK_SIZE 32 #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <torch/extension.h> #include <THH/THHAtomics.cuh> #include "hash_table.cuh" #include "permutohedral.h" #include "utils/meta_macros.h" template <typename scalar_t> struct MatrixEntry { int index; scalar_t weight; }; template <typename scalar_t, int pd> __global__ static void createMatrix( const int elementCount, const scalar_t* positions, const scalar_t* values, const scalar_t* scaleFactor, MatrixEntry<scalar_t>* matrix) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; scalar_t myElevated[pd + 1]; const scalar_t* myPosition = positions + idx * pd; int myGreedy[pd + 1]; int myRank[pd + 1]; scalar_t myBarycentric[pd + 2]; __shared__ short keys[pd * BLOCK_SIZE]; short* myKey = keys + threadId * pd; if (!outOfBounds) { myElevated[pd] = -pd * myPosition[pd - 1] * scaleFactor[pd - 1]; for (int i = pd - 1; i > 0; i--) { myElevated[i] = myElevated[i + 1] - i * (myPosition[i - 1]) * scaleFactor[i - 1] + (i + 2) * myPosition[i] * scaleFactor[i]; } myElevated[0] = myElevated[1] + 2 * myPosition[0] * scaleFactor[0]; // find the closest zero-colored lattice point // greedily search for the closest zero-colored lattice point signed short sum = 0; for (int i = 0; i <= pd; i++) { scalar_t v = myElevated[i] * (1.0f / (pd + 1)); scalar_t up = ceilf(v) * (pd + 1); scalar_t down = floorf(v) * (pd + 1); myGreedy[i] = (signed short)(up - myElevated[i] < myElevated[i] - down ? up : down); sum += myGreedy[i]; } sum /= pd + 1; // sort differential to find the permutation between this simplex and the canonical one for (int i = 0; i <= pd; i++) { myRank[i] = 0; for (int j = 0; j <= pd; j++) { scalar_t iDiff = myElevated[i] - myGreedy[i]; scalar_t jDiff = myElevated[j] - myGreedy[j]; if (iDiff < jDiff || (iDiff == jDiff && i > j)) { myRank[i]++; } } } if (sum > 0) // sum too large, need to bring down the ones with the smallest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] >= pd + 1 - sum) { myGreedy[i] -= (pd + 1); myRank[i] += sum - (pd + 1); } else { myRank[i] += sum; } } } else if (sum < 0) // sum too small, need to bring up the ones with largest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] < -sum) { myGreedy[i] += (pd + 1); myRank[i] += sum + (pd + 1); } else { myRank[i] += sum; } } } #ifdef LINEAR_D_MEMORY for (int i = 0; i <= pd; i++) { table_zeros[idx * (pd + 1) + i] = myGreedy[i]; table_rank[idx * (pd + 1) + i] = myRank[i]; } #endif // turn delta into barycentric coords for (int i = 0; i <= pd + 1; i++) { myBarycentric[i] = 0; } for (int i = 0; i <= pd; i++) { scalar_t delta = (myElevated[i] - myGreedy[i]) * (1.0f / (pd + 1)); myBarycentric[pd - myRank[i]] += delta; myBarycentric[pd + 1 - myRank[i]] -= delta; } myBarycentric[0] += 1.0f + myBarycentric[pd + 1]; } #ifdef USE_ADDITIVE_HASH unsigned int cumulative_hash = hash<pd>(myGreedy); #endif for (int color = 0; color <= pd; color++) { // Compute the location of the lattice point explicitly (all but // the last coordinate - it's redundant because they sum to zero) if (!outOfBounds) { for (int i = 0; i < pd; i++) { myKey[i] = myGreedy[i] + color; if (myRank[i] > pd - color) { myKey[i] -= (pd + 1); } } } #ifdef USE_ADDITIVE_HASH for (int i = 0; i < pd; i++) { if (myRank[i] == pd - color) { cumulative_hash += hOffset[i]; } } #endif if (!outOfBounds) { MatrixEntry<scalar_t> r; #ifdef USE_ADDITIVE_HASH r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx * (pd + 1) + color); #else r.index = hashTableInsert<pd>(myKey, idx * (pd + 1) + color); #endif r.weight = myBarycentric[color]; matrix[idx * (pd + 1) + color] = r; } } } template <typename scalar_t, int kd> __global__ static void cleanHashTable(const int elementCount, MatrixEntry<scalar_t>* matrix) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= elementCount) return; // find my hash table entry int* e = table_entries + idx; // Check if I created my own key in the previous phase if (*e >= 0) { // Rehash my key and reset the pointer in order to merge with // any other pixel that created a different entry under the // same key. If the computation was serial this would never // happen, but sometimes race conditions can make the same key // be inserted twice. hashTableRetrieve always returns the // earlier, so it's no problem as long as we rehash now. #ifdef LINEAR_D_MEMORY // Get my key short myKey[kd]; generateKey<kd>(*e, myKey); *e = hashTableRetrieve<kd>(myKey); #else *e = hashTableRetrieve<kd>(table_keys + *e * kd); #endif } } template <typename scalar_t, int pd, int vd> __global__ static void splat( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const bool outOfBounds = idx >= elementCount; if (outOfBounds) { return; } scalar_t* myValue = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { gpuAtomicAdd(val + j, myValue[j] * r.weight); } gpuAtomicAdd(val + vd, r.weight); } // splat splits by color, so extend the y coordinate to our blocks to represent that // dim3 oldblocks((w-1)/8+1, (h-1)/8+1, 1); // dim3 oldblockSize(8, 8, 1); // oldblocks.y *= pd+1; // splatCache<pd, vd><<<oldblocks, oldblockSize>>>(w, h, values, matrix); // int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; // int blockSize = BLOCK_SIZE; // splatCache<pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd+1)>>>(elementCount, values, matrix); template <typename scalar_t, int pd, int vd> __global__ static void splatCache( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { // const int x = threadIdx.x + blockIdx.x * blockDim.x; // const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y; // const int threadId = threadIdx.y*blockDim.x + threadIdx.x; // const int color = blockIdx.y % (pd+1); // const int idx = y*w + x; const int threadId = threadIdx.x; const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; __shared__ int sharedOffsets[BLOCK_SIZE]; __shared__ scalar_t sharedValues[BLOCK_SIZE * (vd + 1)]; int myOffset = -1; scalar_t* myValue = sharedValues + threadId * (vd + 1); if (!outOfBounds) { scalar_t* value = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; // convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; // record the offset into the keys/values array in shared space myOffset = sharedOffsets[threadId] = r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] = value[j] * r.weight; } myValue[vd] = r.weight; } else { sharedOffsets[threadId] = -1; } __syncthreads(); // am I the first thread in this block to care about this key? if (outOfBounds) return; for (int i = 0; i < BLOCK_SIZE; i++) { if (i < threadId) { if (myOffset == sharedOffsets[i]) { // somebody else with higher priority cares about this key return; } } else if (i > threadId) { if (myOffset == sharedOffsets[i]) { // someone else with lower priority cares about this key, accumulate it into mine for (int j = 0; j <= vd; j++) { sharedValues[threadId * (vd + 1) + j] += sharedValues[i * (vd + 1) + j]; } } } } // only the threads with something to write to main memory are still going scalar_t* val = table_values + myOffset; for (int j = 0; j <= vd; j++) { gpuAtomicAdd(val + j, myValue[j]); } } template <typename scalar_t, int pd, int vd> __global__ static void blur( int n, scalar_t* newValues, MatrixEntry<scalar_t>* matrix, int color, scalar_t* table_values) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // Check if I'm valid if (matrix[idx].index != idx) return; // find my key and the keys of my neighbours short myKey[pd + 1]; short np[pd + 1]; short nm[pd + 1]; #ifdef LINEAR_D_MEMORY generateKey<pd>(idx, myKey); for (int i = 0; i < pd; i++) { np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #else for (int i = 0; i < pd; i++) { myKey[i] = table_keys[idx * pd + i]; np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #endif np[color] -= pd + 1; nm[color] += pd + 1; #ifdef USE_ADDITIVE_HASH unsigned int hCurrent = hash<pd>(myKey); int offNp = hashTableRetrieveWithHash<pd>(hCurrent + hOffset[color], np); int offNm = hashTableRetrieveWithHash<pd>(hCurrent - hOffset[color], nm); #else int offNp = hashTableRetrieve<pd>(np); int offNm = hashTableRetrieve<pd>(nm); #endif scalar_t* valMe = table_values + (vd + 1) * idx; scalar_t* valNp = table_values + (vd + 1) * offNp; scalar_t* valNm = table_values + (vd + 1) * offNm; scalar_t* valOut = newValues + (vd + 1) * idx; if (offNp >= 0 && offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2) + valNm[i]) / 4; } } else if (offNp >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2)) / 4; } } else if (offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNm[i] + (valMe[i] * 2)) / 4; } } else { for (int i = 0; i <= vd; i++) { valOut[i] = valMe[i] * 2; } } } template <typename scalar_t, int pd, int vd> __global__ static void slice( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; if (outOfBounds) return; __shared__ scalar_t localValue[BLOCK_SIZE * vd]; scalar_t* myValue = localValue + threadId * vd; scalar_t myWeight = 0; for (int i = 0; i < vd; i++) { myValue[i] = 0; } for (int i = 0; i <= pd; i++) { MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + i]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] += r.weight * val[j]; } myWeight += r.weight * val[vd]; } myWeight = 1.0f / myWeight; for (int j = 0; j < vd; j++) { values[idx * vd + j] = myValue[j] * myWeight; } } template <typename scalar_t, int vd, int pd> void PermutohedralCuda(scalar_t* values, scalar_t* positions, int elementCount, bool accurate) { scalar_t blurVariance = accurate ? 0.5 : 0; scalar_t* scaleFactor; hipMalloc(&scaleFactor, pd * sizeof(scalar_t)); scalar_t scaleFactorHost[pd]; for (int i = 0; i < pd; i++) { scaleFactorHost[i] = (pd + 1) * sqrtf((1.0 / 6 + blurVariance) / ((i + 1) * (i + 2))); } hipMemcpy(scaleFactor, scaleFactorHost, pd * sizeof(scalar_t), hipMemcpyHostToDevice); MatrixEntry<scalar_t>* matrix; hipMalloc(&matrix, elementCount * (pd + 1) * sizeof(MatrixEntry<scalar_t>)); scalar_t* table_values = createHashTable<scalar_t, pd, vd + 1>(elementCount * (pd + 1)); // Populate constant memory for hash helpers unsigned long long int __host_two32 = ((unsigned long long int)1) << 32; unsigned int __host_div_c = 2 * (elementCount * (pd + 1)); unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f)); unsigned int __host_div_m = (__host_two32 << __host_div_l) / __host_div_c - __host_two32 + 1; hipMemcpyToSymbol(__div_c, &__host_div_c, sizeof(unsigned int)); hipMemcpyToSymbol(__div_l, &__host_div_l, sizeof(unsigned int)); hipMemcpyToSymbol(__div_m, &__host_div_m, sizeof(unsigned int)); // Populate constant memory with hash of offset vectors unsigned int hOffset_host[pd + 1]; signed short offset[pd + 1]; for (int i = 0; i < pd; offset[i] = 1, i++) ; for (int i = 0; i <= pd; i++) { offset[i] -= pd + 1; hOffset_host[i] = hash<pd>(offset); offset[i] += pd + 1; } hipMemcpyToSymbol(hOffset, &hOffset_host, sizeof(unsigned int) * (pd + 1)); int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; int blockSize = BLOCK_SIZE; hipLaunchKernelGGL(( createMatrix<scalar_t, pd>), dim3(blockCount), dim3(blockSize), 0, 0, elementCount, positions, values, scaleFactor, matrix); // fix duplicate hash table entries int tableSize = elementCount * 2 * (pd + 1); int cleanBlockSize = 32; int cleanBlocks = (tableSize - 1) / cleanBlockSize + 1; hipLaunchKernelGGL(( cleanHashTable<scalar_t, pd>), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, tableSize, matrix); hipLaunchKernelGGL(( splat<scalar_t, pd, vd>), dim3(dim3(blockCount, 1)), dim3(dim3(blockSize, pd + 1)), 0, 0, elementCount, values, matrix, table_values); if (accurate) { scalar_t* newValues; hipMalloc(&newValues, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); hipMemset(newValues, 0, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); for (int color = 0; color <= pd; color++) { hipLaunchKernelGGL(( blur<scalar_t, pd, vd>) , dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, elementCount * (pd + 1), newValues, matrix, color, table_values); scalar_t* swap = newValues; newValues = table_values; table_values = swap; } hipFree(newValues); } hipLaunchKernelGGL(( slice<scalar_t, pd, vd>), dim3(blockCount), dim3(blockSize), 0, 0, elementCount, values, matrix, table_values); destroyHashTable<scalar_t>(); hipFree(table_values); hipFree(scaleFactor); hipFree(matrix); } #define DECLARATION(dc, fc) \ template void PermutohedralCuda<float, dc, fc>(float* values, float* positions, int elementCount, bool accurate); \ template void PermutohedralCuda<double, dc, fc>(double* values, double* positions, int elementCount, bool accurate); DO_FOR_AB(DECLARATION, 16, 19)
8063b0075d53537c6d701cae229d1b72159b680e.cu
/* Copyright 2020 - 2021 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Adapted from https://github.com/abadams/permutohedral which has the following license... MIT License Copyright (c) 2020 Andrew Adams Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define BLOCK_SIZE 32 #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <torch/extension.h> #include <THC/THCAtomics.cuh> #include "hash_table.cuh" #include "permutohedral.h" #include "utils/meta_macros.h" template <typename scalar_t> struct MatrixEntry { int index; scalar_t weight; }; template <typename scalar_t, int pd> __global__ static void createMatrix( const int elementCount, const scalar_t* positions, const scalar_t* values, const scalar_t* scaleFactor, MatrixEntry<scalar_t>* matrix) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; scalar_t myElevated[pd + 1]; const scalar_t* myPosition = positions + idx * pd; int myGreedy[pd + 1]; int myRank[pd + 1]; scalar_t myBarycentric[pd + 2]; __shared__ short keys[pd * BLOCK_SIZE]; short* myKey = keys + threadId * pd; if (!outOfBounds) { myElevated[pd] = -pd * myPosition[pd - 1] * scaleFactor[pd - 1]; for (int i = pd - 1; i > 0; i--) { myElevated[i] = myElevated[i + 1] - i * (myPosition[i - 1]) * scaleFactor[i - 1] + (i + 2) * myPosition[i] * scaleFactor[i]; } myElevated[0] = myElevated[1] + 2 * myPosition[0] * scaleFactor[0]; // find the closest zero-colored lattice point // greedily search for the closest zero-colored lattice point signed short sum = 0; for (int i = 0; i <= pd; i++) { scalar_t v = myElevated[i] * (1.0f / (pd + 1)); scalar_t up = ceilf(v) * (pd + 1); scalar_t down = floorf(v) * (pd + 1); myGreedy[i] = (signed short)(up - myElevated[i] < myElevated[i] - down ? up : down); sum += myGreedy[i]; } sum /= pd + 1; // sort differential to find the permutation between this simplex and the canonical one for (int i = 0; i <= pd; i++) { myRank[i] = 0; for (int j = 0; j <= pd; j++) { scalar_t iDiff = myElevated[i] - myGreedy[i]; scalar_t jDiff = myElevated[j] - myGreedy[j]; if (iDiff < jDiff || (iDiff == jDiff && i > j)) { myRank[i]++; } } } if (sum > 0) // sum too large, need to bring down the ones with the smallest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] >= pd + 1 - sum) { myGreedy[i] -= (pd + 1); myRank[i] += sum - (pd + 1); } else { myRank[i] += sum; } } } else if (sum < 0) // sum too small, need to bring up the ones with largest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] < -sum) { myGreedy[i] += (pd + 1); myRank[i] += sum + (pd + 1); } else { myRank[i] += sum; } } } #ifdef LINEAR_D_MEMORY for (int i = 0; i <= pd; i++) { table_zeros[idx * (pd + 1) + i] = myGreedy[i]; table_rank[idx * (pd + 1) + i] = myRank[i]; } #endif // turn delta into barycentric coords for (int i = 0; i <= pd + 1; i++) { myBarycentric[i] = 0; } for (int i = 0; i <= pd; i++) { scalar_t delta = (myElevated[i] - myGreedy[i]) * (1.0f / (pd + 1)); myBarycentric[pd - myRank[i]] += delta; myBarycentric[pd + 1 - myRank[i]] -= delta; } myBarycentric[0] += 1.0f + myBarycentric[pd + 1]; } #ifdef USE_ADDITIVE_HASH unsigned int cumulative_hash = hash<pd>(myGreedy); #endif for (int color = 0; color <= pd; color++) { // Compute the location of the lattice point explicitly (all but // the last coordinate - it's redundant because they sum to zero) if (!outOfBounds) { for (int i = 0; i < pd; i++) { myKey[i] = myGreedy[i] + color; if (myRank[i] > pd - color) { myKey[i] -= (pd + 1); } } } #ifdef USE_ADDITIVE_HASH for (int i = 0; i < pd; i++) { if (myRank[i] == pd - color) { cumulative_hash += hOffset[i]; } } #endif if (!outOfBounds) { MatrixEntry<scalar_t> r; #ifdef USE_ADDITIVE_HASH r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx * (pd + 1) + color); #else r.index = hashTableInsert<pd>(myKey, idx * (pd + 1) + color); #endif r.weight = myBarycentric[color]; matrix[idx * (pd + 1) + color] = r; } } } template <typename scalar_t, int kd> __global__ static void cleanHashTable(const int elementCount, MatrixEntry<scalar_t>* matrix) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= elementCount) return; // find my hash table entry int* e = table_entries + idx; // Check if I created my own key in the previous phase if (*e >= 0) { // Rehash my key and reset the pointer in order to merge with // any other pixel that created a different entry under the // same key. If the computation was serial this would never // happen, but sometimes race conditions can make the same key // be inserted twice. hashTableRetrieve always returns the // earlier, so it's no problem as long as we rehash now. #ifdef LINEAR_D_MEMORY // Get my key short myKey[kd]; generateKey<kd>(*e, myKey); *e = hashTableRetrieve<kd>(myKey); #else *e = hashTableRetrieve<kd>(table_keys + *e * kd); #endif } } template <typename scalar_t, int pd, int vd> __global__ static void splat( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const bool outOfBounds = idx >= elementCount; if (outOfBounds) { return; } scalar_t* myValue = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { gpuAtomicAdd(val + j, myValue[j] * r.weight); } gpuAtomicAdd(val + vd, r.weight); } // splat splits by color, so extend the y coordinate to our blocks to represent that // dim3 oldblocks((w-1)/8+1, (h-1)/8+1, 1); // dim3 oldblockSize(8, 8, 1); // oldblocks.y *= pd+1; // splatCache<pd, vd><<<oldblocks, oldblockSize>>>(w, h, values, matrix); // int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; // int blockSize = BLOCK_SIZE; // splatCache<pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd+1)>>>(elementCount, values, matrix); template <typename scalar_t, int pd, int vd> __global__ static void splatCache( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { // const int x = threadIdx.x + blockIdx.x * blockDim.x; // const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y; // const int threadId = threadIdx.y*blockDim.x + threadIdx.x; // const int color = blockIdx.y % (pd+1); // const int idx = y*w + x; const int threadId = threadIdx.x; const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; __shared__ int sharedOffsets[BLOCK_SIZE]; __shared__ scalar_t sharedValues[BLOCK_SIZE * (vd + 1)]; int myOffset = -1; scalar_t* myValue = sharedValues + threadId * (vd + 1); if (!outOfBounds) { scalar_t* value = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; // convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; // record the offset into the keys/values array in shared space myOffset = sharedOffsets[threadId] = r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] = value[j] * r.weight; } myValue[vd] = r.weight; } else { sharedOffsets[threadId] = -1; } __syncthreads(); // am I the first thread in this block to care about this key? if (outOfBounds) return; for (int i = 0; i < BLOCK_SIZE; i++) { if (i < threadId) { if (myOffset == sharedOffsets[i]) { // somebody else with higher priority cares about this key return; } } else if (i > threadId) { if (myOffset == sharedOffsets[i]) { // someone else with lower priority cares about this key, accumulate it into mine for (int j = 0; j <= vd; j++) { sharedValues[threadId * (vd + 1) + j] += sharedValues[i * (vd + 1) + j]; } } } } // only the threads with something to write to main memory are still going scalar_t* val = table_values + myOffset; for (int j = 0; j <= vd; j++) { gpuAtomicAdd(val + j, myValue[j]); } } template <typename scalar_t, int pd, int vd> __global__ static void blur( int n, scalar_t* newValues, MatrixEntry<scalar_t>* matrix, int color, scalar_t* table_values) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // Check if I'm valid if (matrix[idx].index != idx) return; // find my key and the keys of my neighbours short myKey[pd + 1]; short np[pd + 1]; short nm[pd + 1]; #ifdef LINEAR_D_MEMORY generateKey<pd>(idx, myKey); for (int i = 0; i < pd; i++) { np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #else for (int i = 0; i < pd; i++) { myKey[i] = table_keys[idx * pd + i]; np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #endif np[color] -= pd + 1; nm[color] += pd + 1; #ifdef USE_ADDITIVE_HASH unsigned int hCurrent = hash<pd>(myKey); int offNp = hashTableRetrieveWithHash<pd>(hCurrent + hOffset[color], np); int offNm = hashTableRetrieveWithHash<pd>(hCurrent - hOffset[color], nm); #else int offNp = hashTableRetrieve<pd>(np); int offNm = hashTableRetrieve<pd>(nm); #endif scalar_t* valMe = table_values + (vd + 1) * idx; scalar_t* valNp = table_values + (vd + 1) * offNp; scalar_t* valNm = table_values + (vd + 1) * offNm; scalar_t* valOut = newValues + (vd + 1) * idx; if (offNp >= 0 && offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2) + valNm[i]) / 4; } } else if (offNp >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2)) / 4; } } else if (offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNm[i] + (valMe[i] * 2)) / 4; } } else { for (int i = 0; i <= vd; i++) { valOut[i] = valMe[i] * 2; } } } template <typename scalar_t, int pd, int vd> __global__ static void slice( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; if (outOfBounds) return; __shared__ scalar_t localValue[BLOCK_SIZE * vd]; scalar_t* myValue = localValue + threadId * vd; scalar_t myWeight = 0; for (int i = 0; i < vd; i++) { myValue[i] = 0; } for (int i = 0; i <= pd; i++) { MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + i]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] += r.weight * val[j]; } myWeight += r.weight * val[vd]; } myWeight = 1.0f / myWeight; for (int j = 0; j < vd; j++) { values[idx * vd + j] = myValue[j] * myWeight; } } template <typename scalar_t, int vd, int pd> void PermutohedralCuda(scalar_t* values, scalar_t* positions, int elementCount, bool accurate) { scalar_t blurVariance = accurate ? 0.5 : 0; scalar_t* scaleFactor; cudaMalloc(&scaleFactor, pd * sizeof(scalar_t)); scalar_t scaleFactorHost[pd]; for (int i = 0; i < pd; i++) { scaleFactorHost[i] = (pd + 1) * sqrtf((1.0 / 6 + blurVariance) / ((i + 1) * (i + 2))); } cudaMemcpy(scaleFactor, scaleFactorHost, pd * sizeof(scalar_t), cudaMemcpyHostToDevice); MatrixEntry<scalar_t>* matrix; cudaMalloc(&matrix, elementCount * (pd + 1) * sizeof(MatrixEntry<scalar_t>)); scalar_t* table_values = createHashTable<scalar_t, pd, vd + 1>(elementCount * (pd + 1)); // Populate constant memory for hash helpers unsigned long long int __host_two32 = ((unsigned long long int)1) << 32; unsigned int __host_div_c = 2 * (elementCount * (pd + 1)); unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f)); unsigned int __host_div_m = (__host_two32 << __host_div_l) / __host_div_c - __host_two32 + 1; cudaMemcpyToSymbol(__div_c, &__host_div_c, sizeof(unsigned int)); cudaMemcpyToSymbol(__div_l, &__host_div_l, sizeof(unsigned int)); cudaMemcpyToSymbol(__div_m, &__host_div_m, sizeof(unsigned int)); // Populate constant memory with hash of offset vectors unsigned int hOffset_host[pd + 1]; signed short offset[pd + 1]; for (int i = 0; i < pd; offset[i] = 1, i++) ; for (int i = 0; i <= pd; i++) { offset[i] -= pd + 1; hOffset_host[i] = hash<pd>(offset); offset[i] += pd + 1; } cudaMemcpyToSymbol(hOffset, &hOffset_host, sizeof(unsigned int) * (pd + 1)); int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; int blockSize = BLOCK_SIZE; createMatrix<scalar_t, pd><<<blockCount, blockSize>>>(elementCount, positions, values, scaleFactor, matrix); // fix duplicate hash table entries int tableSize = elementCount * 2 * (pd + 1); int cleanBlockSize = 32; int cleanBlocks = (tableSize - 1) / cleanBlockSize + 1; cleanHashTable<scalar_t, pd><<<cleanBlocks, cleanBlockSize>>>(tableSize, matrix); splat<scalar_t, pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd + 1)>>>(elementCount, values, matrix, table_values); if (accurate) { scalar_t* newValues; cudaMalloc(&newValues, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); cudaMemset(newValues, 0, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); for (int color = 0; color <= pd; color++) { blur<scalar_t, pd, vd> <<<cleanBlocks, cleanBlockSize>>>(elementCount * (pd + 1), newValues, matrix, color, table_values); scalar_t* swap = newValues; newValues = table_values; table_values = swap; } cudaFree(newValues); } slice<scalar_t, pd, vd><<<blockCount, blockSize>>>(elementCount, values, matrix, table_values); destroyHashTable<scalar_t>(); cudaFree(table_values); cudaFree(scaleFactor); cudaFree(matrix); } #define DECLARATION(dc, fc) \ template void PermutohedralCuda<float, dc, fc>(float* values, float* positions, int elementCount, bool accurate); \ template void PermutohedralCuda<double, dc, fc>(double* values, double* positions, int elementCount, bool accurate); DO_FOR_AB(DECLARATION, 16, 19)
df8d373c0a64149dd809ce781df8660185fd64e4.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/conv_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" namespace phi { template <typename T, typename Context> void ConvCudnnKernel(const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* output) { ctx.template Alloc<T>(output); std::vector<int> paddings = paddings_t; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); auto dtype = paddle::platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP // HIP MIOPEN ONLY SUPPORT NCHW format auto compute_format = paddle::platform::DataLayout::kNCHW; #else // Tensor Core introduced from Volta GPUs supports more faster conv op // with FP16 in NHWC data format. const bool compute_in_nhwc = dtype == CUDNN_DATA_HALF && IsVoltaOrLater(ctx); // We will only do data format conversion from NHWC to NCHW. // cudnn will convert NCHW to NHWC automatically on Tensor Core. auto compute_format = compute_in_nhwc && channel_last ? paddle::platform::DataLayout::kNHWC : paddle::platform::DataLayout::kNCHW; #endif VLOG(3) << "Compute ConvOp with cuDNN:" << " data_format=" << data_format << " compute_format=" << (compute_format == paddle::platform::DataLayout::kNHWC ? "NHWC" : "NCHW"); // ------------ transformed tensor ----------- DenseTensor transformed_input_channel(input.type()); DenseTensor transformed_output(output->type()); DenseTensor transformed_filter_channel(filter.type()); T* output_data = nullptr; if (channel_last && compute_format == paddle::platform::DataLayout::kNCHW) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<Context, T>(ctx, &input, &transformed_input_channel); TransToChannelFirst<Context, T>(ctx, &input, &transformed_input_channel); ResizeToChannelFirst<Context, T>(ctx, output, &transformed_output); } else { transformed_input_channel.ShareDataWith(input); transformed_output.ShareDataWith(*output); } if (compute_format == paddle::platform::DataLayout::kNHWC) { VLOG(3) << "Transform filter tensor from NCHW to NHWC."; ResizeToChannelLast<Context, T>(ctx, &filter, &transformed_filter_channel); TransToChannelLast<Context, T>(ctx, &filter, &transformed_filter_channel); } else { transformed_filter_channel.ShareDataWith(filter); } output_data = transformed_output.data<T>(); // update padding and dilation auto in_dims = transformed_input_channel.dims(); auto filter_dims = transformed_filter_channel.dims(); DDim in_data_dims; DDim filter_data_dims; if (compute_format == paddle::platform::DataLayout::kNCHW) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); } else { in_data_dims = slice_ddim(in_dims, 1, in_dims.size() - 1); filter_data_dims = slice_ddim(filter_dims, 1, filter_dims.size() - 1); } std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; if (compute_format == paddle::platform::DataLayout::kNCHW) { new_input_shape_vec[1] = transformed_input_channel.dims()[1]; } else { new_input_shape_vec[data_dim + 1] = transformed_input_channel.dims()[data_dim + 1]; } std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); if (compute_format == paddle::platform::DataLayout::kNCHW) { new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; } else { new_input_shape_vec[i + 1] = transformed_input_channel.dims()[i + 1] + padding_diff[i]; } if (compute_format == paddle::platform::DataLayout::kNCHW) { input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } else { input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i]; } } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_input); const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_input.ShareDataWith(transformed_input_channel); if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); const T* filter_data = transformed_filter_channel.data<T>(); // ------------------- cudnn descriptors --------------------- paddle::operators::ConvArgs args{&transformed_input, &transformed_filter_channel, &transformed_output, strides, padding_common, dilations, dtype}; auto handle = ctx.cudnn_handle(); auto workspace_handle = ctx.cudnn_workspace_handle(); paddle::platform::DataLayout layout = compute_format == paddle::platform::DataLayout::kNHWC ? paddle::platform::DataLayout::kNHWC : paddle::platform::DataLayout::kNCHW; if (transformed_input.dims().size() == 5) { layout = compute_format == paddle::platform::DataLayout::kNHWC ? paddle::platform::DataLayout::kNDHWC : paddle::platform::DataLayout::kNCDHW; } auto layout_format = paddle::platform::GetCudnnTensorFormat(layout); args.handle = handle; #ifdef PADDLE_WITH_HIP // MIOPEN need to set groups in cdesc in miopen_desc.h args.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), groups); #else args.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn()); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it manually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnSetConvolutionGroupCount( args.cdesc.desc(), groups)); groups = 1; #endif #ifdef PADDLE_WITH_HIP // MIOPEN do not set groups in wdesc after set groups in cdesc groups = 1; #endif args.idesc.set(transformed_input, layout_format); args.wdesc.set(transformed_filter_channel, layout_format, groups); args.odesc.set(transformed_output, layout_format); int i_n, i_c, i_d, i_h, i_w; int o_n, o_c, o_d, o_h, o_w; if (compute_format == paddle::platform::DataLayout::kNHWC) { paddle::operators::GetNCDHW(transformed_input.dims(), paddle::platform::DataLayout::kNHWC, &i_n, &i_c, &i_d, &i_h, &i_w); paddle::operators::GetNCDHW(transformed_output.dims(), paddle::platform::DataLayout::kNHWC, &o_n, &o_c, &o_d, &o_h, &o_w); } else { paddle::operators::GetNCDHW(transformed_input.dims(), paddle::platform::DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); paddle::operators::GetNCDHW(transformed_output.dims(), paddle::platform::DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); } int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = transformed_filter_channel.numel() / groups; // ------------------- cudnn conv workspace --------------------- size_t workspace_size = 0; // final workspace to allocate. // ------------------- cudnn conv algorithm --------------------- #ifdef PADDLE_WITH_HIP paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result; using search = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search::GetWorkspaceSize(args); fwd_result.algo = search::Find<T>( args, exhaustive_search, deterministic, workspace_size, ctx); #else paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result; using search = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result = search::Find<T>(args, exhaustive_search, deterministic, ctx); workspace_size = search::GetWorkspaceSize(args, fwd_result.algo); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\ // FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable // in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\ // FWD_ALGO_IMPLICIT_GEMM manually. if (groups > 1) { fwd_result.algo = static_cast<cudnnConvolutionFwdAlgo_t>(0); } #endif // ------------------- cudnn conv forward --------------------- paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f; // VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto"); #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args.idesc.desc(), input_data, args.wdesc.desc(), filter_data, args.cdesc.desc(), fwd_result.algo, &beta, args.odesc.desc(), output_data, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args.idesc.desc(), input_data + i * group_offset_in, args.wdesc.desc(), filter_data + i * group_offset_filter, args.cdesc.desc(), fwd_result.algo, workspace_ptr, workspace_size, &beta, args.odesc.desc(), output_data + i * group_offset_out)); }, workspace_size); } #endif if (channel_last && compute_format == paddle::platform::DataLayout::kNCHW) { TransToChannelLast<Context, T>(ctx, &transformed_output, output); } } template <typename T, typename Context> void Conv3DCudnnKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, DenseTensor* out) { ConvCudnnKernel<T>(dev_ctx, input, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, out); } template <typename T, typename Context> void DepthwiseConvCudnnKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, DenseTensor* out) { ConvCudnnKernel<T>(dev_ctx, input, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, out); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d, GPUDNN, ALL_LAYOUT, phi::ConvCudnnKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d, GPUDNN, ALL_LAYOUT, phi::DepthwiseConvCudnnKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d, GPUDNN, ALL_LAYOUT, phi::ConvCudnnKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d, GPUDNN, ALL_LAYOUT, phi::ConvCudnnKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnKernel, float, double, phi::dtype::float16) {} #endif #endif // todo register bfloat16
df8d373c0a64149dd809ce781df8660185fd64e4.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/conv_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" namespace phi { template <typename T, typename Context> void ConvCudnnKernel(const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* output) { ctx.template Alloc<T>(output); std::vector<int> paddings = paddings_t; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); auto dtype = paddle::platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP // HIP MIOPEN ONLY SUPPORT NCHW format auto compute_format = paddle::platform::DataLayout::kNCHW; #else // Tensor Core introduced from Volta GPUs supports more faster conv op // with FP16 in NHWC data format. const bool compute_in_nhwc = dtype == CUDNN_DATA_HALF && IsVoltaOrLater(ctx); // We will only do data format conversion from NHWC to NCHW. // cudnn will convert NCHW to NHWC automatically on Tensor Core. auto compute_format = compute_in_nhwc && channel_last ? paddle::platform::DataLayout::kNHWC : paddle::platform::DataLayout::kNCHW; #endif VLOG(3) << "Compute ConvOp with cuDNN:" << " data_format=" << data_format << " compute_format=" << (compute_format == paddle::platform::DataLayout::kNHWC ? "NHWC" : "NCHW"); // ------------ transformed tensor ----------- DenseTensor transformed_input_channel(input.type()); DenseTensor transformed_output(output->type()); DenseTensor transformed_filter_channel(filter.type()); T* output_data = nullptr; if (channel_last && compute_format == paddle::platform::DataLayout::kNCHW) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<Context, T>(ctx, &input, &transformed_input_channel); TransToChannelFirst<Context, T>(ctx, &input, &transformed_input_channel); ResizeToChannelFirst<Context, T>(ctx, output, &transformed_output); } else { transformed_input_channel.ShareDataWith(input); transformed_output.ShareDataWith(*output); } if (compute_format == paddle::platform::DataLayout::kNHWC) { VLOG(3) << "Transform filter tensor from NCHW to NHWC."; ResizeToChannelLast<Context, T>(ctx, &filter, &transformed_filter_channel); TransToChannelLast<Context, T>(ctx, &filter, &transformed_filter_channel); } else { transformed_filter_channel.ShareDataWith(filter); } output_data = transformed_output.data<T>(); // update padding and dilation auto in_dims = transformed_input_channel.dims(); auto filter_dims = transformed_filter_channel.dims(); DDim in_data_dims; DDim filter_data_dims; if (compute_format == paddle::platform::DataLayout::kNCHW) { in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); } else { in_data_dims = slice_ddim(in_dims, 1, in_dims.size() - 1); filter_data_dims = slice_ddim(filter_dims, 1, filter_dims.size() - 1); } std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_input; std::vector<int> padding_common(data_dim, 0); if (!is_sys_pad) { std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_input_channel.dims()[0]; if (compute_format == paddle::platform::DataLayout::kNCHW) { new_input_shape_vec[1] = transformed_input_channel.dims()[1]; } else { new_input_shape_vec[data_dim + 1] = transformed_input_channel.dims()[data_dim + 1]; } std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0); for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); if (compute_format == paddle::platform::DataLayout::kNCHW) { new_input_shape_vec[i + 2] = transformed_input_channel.dims()[i + 2] + padding_diff[i]; } else { new_input_shape_vec[i + 1] = transformed_input_channel.dims()[i + 1] + padding_diff[i]; } if (compute_format == paddle::platform::DataLayout::kNCHW) { input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } else { input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i]; } } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_input.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_input); const int rank = transformed_input_channel.dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_input.ShareDataWith(transformed_input_channel); if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* input_data = transformed_input.data<T>(); const T* filter_data = transformed_filter_channel.data<T>(); // ------------------- cudnn descriptors --------------------- paddle::operators::ConvArgs args{&transformed_input, &transformed_filter_channel, &transformed_output, strides, padding_common, dilations, dtype}; auto handle = ctx.cudnn_handle(); auto workspace_handle = ctx.cudnn_workspace_handle(); paddle::platform::DataLayout layout = compute_format == paddle::platform::DataLayout::kNHWC ? paddle::platform::DataLayout::kNHWC : paddle::platform::DataLayout::kNCHW; if (transformed_input.dims().size() == 5) { layout = compute_format == paddle::platform::DataLayout::kNHWC ? paddle::platform::DataLayout::kNDHWC : paddle::platform::DataLayout::kNCDHW; } auto layout_format = paddle::platform::GetCudnnTensorFormat(layout); args.handle = handle; #ifdef PADDLE_WITH_HIP // MIOPEN need to set groups in cdesc in miopen_desc.h args.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), groups); #else args.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn()); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it manually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnSetConvolutionGroupCount( args.cdesc.desc(), groups)); groups = 1; #endif #ifdef PADDLE_WITH_HIP // MIOPEN do not set groups in wdesc after set groups in cdesc groups = 1; #endif args.idesc.set(transformed_input, layout_format); args.wdesc.set(transformed_filter_channel, layout_format, groups); args.odesc.set(transformed_output, layout_format); int i_n, i_c, i_d, i_h, i_w; int o_n, o_c, o_d, o_h, o_w; if (compute_format == paddle::platform::DataLayout::kNHWC) { paddle::operators::GetNCDHW(transformed_input.dims(), paddle::platform::DataLayout::kNHWC, &i_n, &i_c, &i_d, &i_h, &i_w); paddle::operators::GetNCDHW(transformed_output.dims(), paddle::platform::DataLayout::kNHWC, &o_n, &o_c, &o_d, &o_h, &o_w); } else { paddle::operators::GetNCDHW(transformed_input.dims(), paddle::platform::DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); paddle::operators::GetNCDHW(transformed_output.dims(), paddle::platform::DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); } int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = transformed_filter_channel.numel() / groups; // ------------------- cudnn conv workspace --------------------- size_t workspace_size = 0; // final workspace to allocate. // ------------------- cudnn conv algorithm --------------------- #ifdef PADDLE_WITH_HIP paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result; using search = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search::GetWorkspaceSize(args); fwd_result.algo = search::Find<T>( args, exhaustive_search, deterministic, workspace_size, ctx); #else paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result; using search = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result = search::Find<T>(args, exhaustive_search, deterministic, ctx); workspace_size = search::GetWorkspaceSize(args, fwd_result.algo); #endif #if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1) // when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\ // FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable // in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\ // FWD_ALGO_IMPLICIT_GEMM manually. if (groups > 1) { fwd_result.algo = static_cast<cudnnConvolutionFwdAlgo_t>(0); } #endif // ------------------- cudnn conv forward --------------------- paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f; // VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto"); #ifdef PADDLE_WITH_HIP workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args.idesc.desc(), input_data, args.wdesc.desc(), filter_data, args.cdesc.desc(), fwd_result.algo, &beta, args.odesc.desc(), output_data, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { workspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args.idesc.desc(), input_data + i * group_offset_in, args.wdesc.desc(), filter_data + i * group_offset_filter, args.cdesc.desc(), fwd_result.algo, workspace_ptr, workspace_size, &beta, args.odesc.desc(), output_data + i * group_offset_out)); }, workspace_size); } #endif if (channel_last && compute_format == paddle::platform::DataLayout::kNCHW) { TransToChannelLast<Context, T>(ctx, &transformed_output, output); } } template <typename T, typename Context> void Conv3DCudnnKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, DenseTensor* out) { ConvCudnnKernel<T>(dev_ctx, input, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, out); } template <typename T, typename Context> void DepthwiseConvCudnnKernel(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, DenseTensor* out) { ConvCudnnKernel<T>(dev_ctx, input, filter, strides, paddings, padding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, out); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d, GPUDNN, ALL_LAYOUT, phi::ConvCudnnKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d, GPUDNN, ALL_LAYOUT, phi::DepthwiseConvCudnnKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d, GPUDNN, ALL_LAYOUT, phi::ConvCudnnKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d, GPUDNN, ALL_LAYOUT, phi::ConvCudnnKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnKernel, float, double, phi::dtype::float16) {} #endif #endif // todo register bfloat16
0c49208dda42a682eaec1f6fc54267921a4e4453.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #define N 100 #define IT 3 __global__ void JacobiIteration(int n, float *a, float *b, float *x, float*x_result){ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ float sigma = 0; for(int j = 0 ; j<n; j++){ if(j!=i){ sigma += a[i + j * n]*x[j]; } } x_result[i] = (b[i] - sigma)/a[i + i*n]; } } __global__ void initAMatrix(int n, float*a){ int i; for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x){ for ( i = 0; i < n; i++ ){ if ( j == i - 1 ){ a[j+i*n] = -1.0; } else if ( j == i ){ a[j+i*n] = 2.0; } else if ( j == i + 1 ){ a[j+i*n] = -1.0; } else{ a[j+i*n] = 0.0; } } } } __global__ void copy(int n, float*a, float *b){ for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x){ a[j] = b[j]; } } void swap(float* &a, float* &b){ float *temp = a; a = b; b = temp; } int main(){ float *a, *b, *x, *x_result; // alloc hipMallocManaged(&a, N*N*sizeof(float)); hipMallocManaged(&b, N*sizeof(float)); hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&x_result, N*sizeof(float)); // init for (int i = 0; i < N; i++ ) { b[i] = 3.0; } b[N-1] = ( float ) ( N + 1 ); for ( int i = 0; i < N; i++ ) { x[i] = 0.0; } hipLaunchKernelGGL(( initAMatrix), dim3(32), dim3(32), 0, 0, N, a); for ( int it = 0; it < IT; it++ ){ hipLaunchKernelGGL(( JacobiIteration), dim3(32), dim3(32), 0, 0, N, a, b, x, x_result); hipDeviceSynchronize(); swap(x, x_result); } for(int i = 0; i < N; i++){ printf("%f ",x[i]); } return 0; }
0c49208dda42a682eaec1f6fc54267921a4e4453.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #define N 100 #define IT 3 __global__ void JacobiIteration(int n, float *a, float *b, float *x, float*x_result){ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){ float sigma = 0; for(int j = 0 ; j<n; j++){ if(j!=i){ sigma += a[i + j * n]*x[j]; } } x_result[i] = (b[i] - sigma)/a[i + i*n]; } } __global__ void initAMatrix(int n, float*a){ int i; for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x){ for ( i = 0; i < n; i++ ){ if ( j == i - 1 ){ a[j+i*n] = -1.0; } else if ( j == i ){ a[j+i*n] = 2.0; } else if ( j == i + 1 ){ a[j+i*n] = -1.0; } else{ a[j+i*n] = 0.0; } } } } __global__ void copy(int n, float*a, float *b){ for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x){ a[j] = b[j]; } } void swap(float* &a, float* &b){ float *temp = a; a = b; b = temp; } int main(){ float *a, *b, *x, *x_result; // alloc cudaMallocManaged(&a, N*N*sizeof(float)); cudaMallocManaged(&b, N*sizeof(float)); cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&x_result, N*sizeof(float)); // init for (int i = 0; i < N; i++ ) { b[i] = 3.0; } b[N-1] = ( float ) ( N + 1 ); for ( int i = 0; i < N; i++ ) { x[i] = 0.0; } initAMatrix<<<32, 32>>>(N, a); for ( int it = 0; it < IT; it++ ){ JacobiIteration<<<32, 32>>>(N, a, b, x, x_result); cudaDeviceSynchronize(); swap(x, x_result); } for(int i = 0; i < N; i++){ printf("%f ",x[i]); } return 0; }
f3b4395730d94d6392d7d1aab95dad26d84b12d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** File: jacobi2D.cu Author: Terrence Alsup Date: April 15, 2019 HPC 2019 : HW 4 Solve the Equation -Lu = f where L is the Laplacian using the Jacobi method. u = 0 on the boundary f = 1 **/ #include <algorithm> #include <stdio.h> #include <omp.h> #include <string> #include <math.h> #define BLOCK_SIZE 1024 /** The reduction kernel to add a vector of numbers in a block. **/ __global__ void reduction_kernel(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } /** The kernel to compute the Jacobi update at a site. **/ __global__ void jacobi_kernel(double *unew, const double *u, const double *f, long N, double h2) { // The index of the site on the grid we are updating. int idx = (blockIdx.x) * blockDim.x + threadIdx.x; long i = idx / (N+2); // The row. long j = idx % (N+2); // The column. double temp; if (i > 0 && i < (N+2) && j > 0 && j < (N+2)) { // Only update the interior points. temp = h2*f[idx]; temp += u[(i-1)*(N+2) + j]; temp += u[i*(N+2) + j - 1]; temp += u[(i+1)*(N+2) + j]; temp += u[i*(N+2) + j + 1]; unew[idx] = 0.25 * temp; } else { // Boundary points are set to 0 still. unew[idx] = 0; } } /** Compute the residual |-Du - f|_2^2 for a block of threads. **/ __global__ void residual_kernel(double *res, const double *u, const double *f, long N, double h2) { __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; long i = idx / (N+2); // The row. long j = idx % (N+2); // The column. double Du; if (i > 0 && i < (N+2) && j > 0 && j < (N+2)) { // Discrete Laplacian of u. Du = -4 * u[idx]; Du += u[(i-1)*(N+2) + j]; Du += u[(i+1)*(N+2) + j]; Du += u[i*(N+2) + j - 1]; Du += u[i*(N+2) + j + 1]; Du /= h2; smem[threadIdx.x] = (Du + f[idx]) * (Du + f[idx]); // Residual at the point. } else { smem[threadIdx.x] = 0; } __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) res[blockIdx.x] = smem[0] + smem[1]; } } int main(int argc, char** argv) { // Number of interior points along each dimension. long N = (1UL<<7); // N = 2^7 // Arrays for the solution u and RHS f. double *u, *f; // Allocate space on the CPU. // Notice that we include the boundaries as well. hipHostMalloc((void**)&u, (N+2) * (N+2) * sizeof(double)); hipHostMalloc((void**)&f, (N+2) * (N+2) * sizeof(double)); // Initialize the initial condition for u and f, done in parallel. #pragma omp parallel for for (long i = 0; i < (N+2)*(N+2); i++) { u[i] = 0; f[i] = 1; } // Allocate space on the GPU to calculate the solutions. double *u_d, *f_d, *unew_d; hipMalloc(&u_d, (N+2) * (N+2) * sizeof(double)); hipMalloc(&f_d, (N+2) * (N+2) * sizeof(double)); hipMalloc(&unew_d, (N+2) * (N+2) * sizeof(double)); // Transfer data to the GPU. hipMemcpyAsync(u_d, u, (N+2)*(N+2)*sizeof(double), hipMemcpyHostToDevice); hipMemcpyAsync(f_d, f, (N+2)*(N+2)*sizeof(double), hipMemcpyHostToDevice); hipMemcpyAsync(unew_d, u, (N+2)*(N+2)*sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); // Now do the Jacobi iterations. long maxiter = 1e4; double tolerance = 1e-1; // Absolute tolerance double residual = 2 * tolerance; // Residual |Du-f|_2 double h2 = 1.0/((N+1) * (N+1)); // Grid size h^2. // Nb is number of blocks to compute with for Jacobi on the GPU. long Nb = ((N+2)*(N+2) + BLOCK_SIZE - 1) / (BLOCK_SIZE); // Extra memory buffer for reduction across thread-blocks. double *y_d; long N_work = 1; for (long i = Nb; i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i; hipMalloc(&y_d, N_work*sizeof(double)); double start = omp_get_wtime(); // Get the starting time. long iter = 1; while (iter <= maxiter && residual > tolerance) { // Do the Jacobi update on the GPU. // Each block will compute the update of a single entry on the grid. hipLaunchKernelGGL(( jacobi_kernel), dim3(Nb), dim3(BLOCK_SIZE), 0, 0, unew_d, u_d, f_d, N, h2); // First synchronize all threads and then update u on the GPU. hipDeviceSynchronize(); hipMemcpy(u_d, unew_d, (N+2)*(N+2)*sizeof(double), hipMemcpyDeviceToDevice); // Compute the residual on the GPU. double* res_d = y_d; // Result from each block. hipLaunchKernelGGL(( residual_kernel), dim3(Nb), dim3(BLOCK_SIZE), 0, 0, res_d, u_d, f_d, N, h2); // Now add the residuals from all of the blocks. long Nbres = Nb; while (Nbres > 1) { long temp = Nbres; Nbres = (Nbres+BLOCK_SIZE-1)/(BLOCK_SIZE); hipLaunchKernelGGL(( reduction_kernel), dim3(Nbres),dim3(BLOCK_SIZE), 0, 0, res_d + Nbres, res_d, temp); res_d += Nbres; } // Transfer the computation of the residual to the CPU. hipMemcpyAsync(&residual, res_d, 1*sizeof(double), hipMemcpyDeviceToHost); // Print what the residual was every 500 iterations. if (iter % 500 == 0) { printf("Residual at Iteration %d = %f\n", iter, residual); } iter++; } double elapsed = omp_get_wtime() - start; if (residual < tolerance) { printf("\nConverged in %d iterations with residual = %f\n", iter, residual); } else { printf("\nFailed to converge in %d iterations. Final residual = %f\n", maxiter, residual); } printf("Total wall clock time (s) = %f\n", elapsed); // Now free the memory that was allocated on the CPU and GPU. hipFree(u_d); hipFree(f_d); hipFree(unew_d); hipFree(y_d); hipHostFree(u); hipHostFree(f); return 0; }
f3b4395730d94d6392d7d1aab95dad26d84b12d3.cu
/** File: jacobi2D.cu Author: Terrence Alsup Date: April 15, 2019 HPC 2019 : HW 4 Solve the Equation -Lu = f where L is the Laplacian using the Jacobi method. u = 0 on the boundary f = 1 **/ #include <algorithm> #include <stdio.h> #include <omp.h> #include <string> #include <math.h> #define BLOCK_SIZE 1024 /** The reduction kernel to add a vector of numbers in a block. **/ __global__ void reduction_kernel(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } /** The kernel to compute the Jacobi update at a site. **/ __global__ void jacobi_kernel(double *unew, const double *u, const double *f, long N, double h2) { // The index of the site on the grid we are updating. int idx = (blockIdx.x) * blockDim.x + threadIdx.x; long i = idx / (N+2); // The row. long j = idx % (N+2); // The column. double temp; if (i > 0 && i < (N+2) && j > 0 && j < (N+2)) { // Only update the interior points. temp = h2*f[idx]; temp += u[(i-1)*(N+2) + j]; temp += u[i*(N+2) + j - 1]; temp += u[(i+1)*(N+2) + j]; temp += u[i*(N+2) + j + 1]; unew[idx] = 0.25 * temp; } else { // Boundary points are set to 0 still. unew[idx] = 0; } } /** Compute the residual |-Du - f|_2^2 for a block of threads. **/ __global__ void residual_kernel(double *res, const double *u, const double *f, long N, double h2) { __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; long i = idx / (N+2); // The row. long j = idx % (N+2); // The column. double Du; if (i > 0 && i < (N+2) && j > 0 && j < (N+2)) { // Discrete Laplacian of u. Du = -4 * u[idx]; Du += u[(i-1)*(N+2) + j]; Du += u[(i+1)*(N+2) + j]; Du += u[i*(N+2) + j - 1]; Du += u[i*(N+2) + j + 1]; Du /= h2; smem[threadIdx.x] = (Du + f[idx]) * (Du + f[idx]); // Residual at the point. } else { smem[threadIdx.x] = 0; } __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) res[blockIdx.x] = smem[0] + smem[1]; } } int main(int argc, char** argv) { // Number of interior points along each dimension. long N = (1UL<<7); // N = 2^7 // Arrays for the solution u and RHS f. double *u, *f; // Allocate space on the CPU. // Notice that we include the boundaries as well. cudaMallocHost((void**)&u, (N+2) * (N+2) * sizeof(double)); cudaMallocHost((void**)&f, (N+2) * (N+2) * sizeof(double)); // Initialize the initial condition for u and f, done in parallel. #pragma omp parallel for for (long i = 0; i < (N+2)*(N+2); i++) { u[i] = 0; f[i] = 1; } // Allocate space on the GPU to calculate the solutions. double *u_d, *f_d, *unew_d; cudaMalloc(&u_d, (N+2) * (N+2) * sizeof(double)); cudaMalloc(&f_d, (N+2) * (N+2) * sizeof(double)); cudaMalloc(&unew_d, (N+2) * (N+2) * sizeof(double)); // Transfer data to the GPU. cudaMemcpyAsync(u_d, u, (N+2)*(N+2)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpyAsync(f_d, f, (N+2)*(N+2)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpyAsync(unew_d, u, (N+2)*(N+2)*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // Now do the Jacobi iterations. long maxiter = 1e4; double tolerance = 1e-1; // Absolute tolerance double residual = 2 * tolerance; // Residual |Du-f|_2 double h2 = 1.0/((N+1) * (N+1)); // Grid size h^2. // Nb is number of blocks to compute with for Jacobi on the GPU. long Nb = ((N+2)*(N+2) + BLOCK_SIZE - 1) / (BLOCK_SIZE); // Extra memory buffer for reduction across thread-blocks. double *y_d; long N_work = 1; for (long i = Nb; i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i; cudaMalloc(&y_d, N_work*sizeof(double)); double start = omp_get_wtime(); // Get the starting time. long iter = 1; while (iter <= maxiter && residual > tolerance) { // Do the Jacobi update on the GPU. // Each block will compute the update of a single entry on the grid. jacobi_kernel<<<Nb, BLOCK_SIZE>>>(unew_d, u_d, f_d, N, h2); // First synchronize all threads and then update u on the GPU. cudaDeviceSynchronize(); cudaMemcpy(u_d, unew_d, (N+2)*(N+2)*sizeof(double), cudaMemcpyDeviceToDevice); // Compute the residual on the GPU. double* res_d = y_d; // Result from each block. residual_kernel<<<Nb, BLOCK_SIZE>>>(res_d, u_d, f_d, N, h2); // Now add the residuals from all of the blocks. long Nbres = Nb; while (Nbres > 1) { long temp = Nbres; Nbres = (Nbres+BLOCK_SIZE-1)/(BLOCK_SIZE); reduction_kernel<<<Nbres,BLOCK_SIZE>>>(res_d + Nbres, res_d, temp); res_d += Nbres; } // Transfer the computation of the residual to the CPU. cudaMemcpyAsync(&residual, res_d, 1*sizeof(double), cudaMemcpyDeviceToHost); // Print what the residual was every 500 iterations. if (iter % 500 == 0) { printf("Residual at Iteration %d = %f\n", iter, residual); } iter++; } double elapsed = omp_get_wtime() - start; if (residual < tolerance) { printf("\nConverged in %d iterations with residual = %f\n", iter, residual); } else { printf("\nFailed to converge in %d iterations. Final residual = %f\n", maxiter, residual); } printf("Total wall clock time (s) = %f\n", elapsed); // Now free the memory that was allocated on the CPU and GPU. cudaFree(u_d); cudaFree(f_d); cudaFree(unew_d); cudaFree(y_d); cudaFreeHost(u); cudaFreeHost(f); return 0; }
9ea3b36ab38a4d41ea467186ba412230197293be.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include <THH/THHApply.cuh> template <typename T> struct sqrtupdateOutput_functor { const T bias; sqrtupdateOutput_functor(T bias_) : bias(bias_) {} __device__ void operator()(T *output, const T *input) const { *output = sqrt(*input + bias); } }; template <typename T> struct sqrtupdateGradInput_functor { sqrtupdateGradInput_functor() {} __device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const { *gradInput = (THCNumerics<T>::eq(*output,ScalarConvert<float, T>::to(0.0f))) ? ScalarConvert<float, T>::to(0.0f) : ((ScalarConvert<float, T>::to(0.5f) * *gradOutput) / *output); } }; #include "generic/Sqrt.cu" #include "THHGenerateFloatTypes.h"
9ea3b36ab38a4d41ea467186ba412230197293be.cu
#include "THCUNN.h" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include <THC/THCApply.cuh> template <typename T> struct sqrtupdateOutput_functor { const T bias; sqrtupdateOutput_functor(T bias_) : bias(bias_) {} __device__ void operator()(T *output, const T *input) const { *output = sqrt(*input + bias); } }; template <typename T> struct sqrtupdateGradInput_functor { sqrtupdateGradInput_functor() {} __device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const { *gradInput = (THCNumerics<T>::eq(*output,ScalarConvert<float, T>::to(0.0f))) ? ScalarConvert<float, T>::to(0.0f) : ((ScalarConvert<float, T>::to(0.5f) * *gradOutput) / *output); } }; #include "generic/Sqrt.cu" #include "THCGenerateFloatTypes.h"
178eae05c97d7382a8cf2f56b33a894c0afe5e9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(512) __global__ void upsample_linear1d_out_frame( const int n, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor64<scalar_t, 3> idata, PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][w1]; odata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p]; odata[n][c][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(512) __global__ void upsample_linear1d_out_frame_backward( const int n, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor64<scalar_t, 3> idata, const PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][w1]; idata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][w2]; gpuAtomicAddNoReturn(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val)); gpuAtomicAddNoReturn( &idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val)); } } } } static void upsample_linear1d_out_cuda_template( const Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); int output_width = output_size[0]; int input_width = input.size(2); output.zero_(); AT_ASSERT(input_width > 0 && output_width > 0); const int num_kernels = output_width; const int num_threads = 512; //at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_linear1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 3>(); auto odata = output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); hipLaunchKernelGGL(( upsample_linear1d_out_frame<scalar_t, accscalar_t>) , dim3(ceil_div(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, idata, odata); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } static void upsample_linear1d_backward_out_cuda_template( const Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { TensorArg grad_output_arg{grad_output_, "grad_output_", 1}, grad_input_arg{grad_input, "grad_input", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); int output_width = output_size[0]; int input_width = input_size[2]; Tensor grad_output = grad_output_.contiguous(); grad_input.zero_(); const int num_kernels = output_width; const int num_threads = 512; //at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 3>(); auto odata = grad_output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); hipLaunchKernelGGL(( upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>) , dim3(ceil_div(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rwidth, align_corners, idata, odata); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_linear1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales, const Tensor& output ) { upsample_linear1d_out_cuda_template(output, input, output_size, align_corners, scales); } TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales, const Tensor& grad_input ) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_out_cuda"); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); } } // namespace native } // namespace at
178eae05c97d7382a8cf2f56b33a894c0afe5e9d.cu
// Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(512) __global__ void upsample_linear1d_out_frame( const int n, const accscalar_t rwidth, const bool align_corners, const PackedTensorAccessor64<scalar_t, 3> idata, PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = idata[n][c][w1]; odata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const accscalar_t val = w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p]; odata[n][c][w2] = static_cast<scalar_t>(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(512) __global__ void upsample_linear1d_out_frame_backward( const int n, const accscalar_t rwidth, const bool align_corners, PackedTensorAccessor64<scalar_t, 3> idata, const PackedTensorAccessor64<scalar_t, 3> odata) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = idata.size(0); const int channels = idata.size(1); const int width1 = idata.size(2); const int width2 = odata.size(2); if (index < n) { const int w2 = index % width2; // special case: just copy if (width1 == width2) { const int w1 = w2; for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t val = odata[n][c][w1]; idata[n][c][w2] = val; } } return; } // const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>( rwidth, w2, align_corners, /*cubic=*/false); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const accscalar_t w1lambda = w1r - w1; const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda; // for (int n = 0; n < batchsize; n++) { for (int c = 0; c < channels; ++c) { const scalar_t d2val = odata[n][c][w2]; gpuAtomicAddNoReturn(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val)); gpuAtomicAddNoReturn( &idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val)); } } } } static void upsample_linear1d_out_cuda_template( const Tensor& output, const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales) { TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); int output_width = output_size[0]; int input_width = input.size(2); output.zero_(); AT_ASSERT(input_width > 0 && output_width > 0); const int num_kernels = output_width; const int num_threads = 512; //at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_linear1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.packed_accessor64<scalar_t, 3>(); auto odata = output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); upsample_linear1d_out_frame<scalar_t, accscalar_t> <<<ceil_div(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, idata, odata); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } static void upsample_linear1d_backward_out_cuda_template( const Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales) { TensorArg grad_output_arg{grad_output_, "grad_output_", 1}, grad_input_arg{grad_input, "grad_input", 2}; checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg}); int output_width = output_size[0]; int input_width = input_size[2]; Tensor grad_output = grad_output_.contiguous(); grad_input.zero_(); const int num_kernels = output_width; const int num_threads = 512; //at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.packed_accessor64<scalar_t, 3>(); auto odata = grad_output.packed_accessor64<scalar_t, 3>(); const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( input_width, output_width, align_corners, scales); upsample_linear1d_out_frame_backward<scalar_t, accscalar_t> <<<ceil_div(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rwidth, align_corners, idata, odata); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace TORCH_IMPL_FUNC(upsample_linear1d_out_cuda) ( const Tensor& input, IntArrayRef output_size, bool align_corners, c10::optional<double> scales, const Tensor& output ) { upsample_linear1d_out_cuda_template(output, input, output_size, align_corners, scales); } TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cuda) ( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales, const Tensor& grad_input ) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("upsample_linear1d_backward_out_cuda"); upsample_linear1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size, align_corners, scales); } } // namespace native } // namespace at
e819561eedb7ae565dcb05d72228c69979135baf.hip
// !!! This is a file automatically generated by hipify!!! /** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya * @date 2012-2015 * @copyright University of Pennsylvania & STUDENT */ #include "rasterize_min.h" #include <cmath> #include <cstdio> #include <hip/hip_runtime.h> #include <thrust/random.h> #include <util/checkCUDAError.h> #include "rasterizeTools.h" struct VertexIn { glm::vec3 pos; glm::vec3 nor; glm::vec3 col; // TODO (optional) add other vertex attributes (e.g. texture coordinates) }; struct VertexOut { // TODO glm::vec3 pos; glm::vec3 nor; glm::vec3 col; }; struct Triangle { VertexOut v[3]; }; struct FragmentIn { glm::vec3 color; glm::vec3 normal; float depth; }; struct FragmentOut { glm::vec3 color; }; static int width = 0; static int height = 0; static int *dev_bufIdx = NULL; // indices buffer static VertexIn *dev_bufVertex = NULL; // buffer of untransformed vertices static VertexOut *dev_shadedVertices = NULL; // buffer of transformed vertices. need? static Triangle *dev_primitives = NULL; // buffer of primitives static FragmentIn *dev_fragsIn = NULL; // buffer of unshaded fragments. static FragmentOut *dev_fragsOut = NULL; // buffer of shaded fragments. basically, pixels. need? static glm::vec3 *dev_framebuffer = NULL; // color frame buffer. need? static int bufIdxSize = 0; static int vertCount = 0; /** * Kernel that clears frags in */ __global__ void minClearFragsIn(int w, int h, FragmentIn *dev_fragsIn) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { dev_fragsIn[x + (y * w)].depth = 0x7ff0000000000000;// 0xfff0000000000000; dev_fragsIn[x + (y * w)].color = glm::vec3(0.0f, 0.0f, 0.0f); dev_fragsIn[x + (y * w)].normal = glm::vec3(0.1f, 0.1f, 0.1f); } } /** * Kernel that clears frags out */ __global__ void minClearFragsOut(int w, int h, FragmentOut *dev_fragsOut) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { dev_fragsOut[x + (y * w)].color = glm::vec3(0.0f, 0.0f, 0.0f); } } /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void minSendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } // Writes fragment colors to the framebuffer __global__ void render(int w, int h, FragmentOut *frags, glm::vec3 *framebuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); // frameBuffer code assumes (0,0) is at the bottom left in pix coords, // but this code assumes it's at the top right. int frameBufferIndex = (w - 1 - x) + (h - 1 - y) * w; if (x < w && y < h) { framebuffer[index] = frags[frameBufferIndex].color; } } /** * Called once at the beginning of the program to allocate memory. */ void minRasterizeInit(int w, int h) { width = w; height = h; hipFree(dev_fragsIn); hipMalloc(&dev_fragsIn, width * height * sizeof(FragmentIn)); hipMemset(dev_fragsIn, 0, width * height * sizeof(FragmentIn)); hipFree(dev_fragsOut); hipMalloc(&dev_fragsOut, width * height * sizeof(FragmentOut)); hipMemset(dev_fragsOut, 0, width * height * sizeof(FragmentOut)); hipFree(dev_framebuffer); hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3)); hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); checkCUDAError("rasterizeInit"); } /** * Set all of the buffers necessary for rasterization. */ void minRasterizeSetBuffers( int _bufIdxSize, int *bufIdx, int _vertCount, float *bufPos, float *bufNor, float *bufCol) { bufIdxSize = _bufIdxSize; vertCount = _vertCount; hipFree(dev_bufIdx); hipMalloc(&dev_bufIdx, bufIdxSize * sizeof(int)); hipMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), hipMemcpyHostToDevice); VertexIn *bufVertex = new VertexIn[_vertCount]; VertexOut *bufVertexOut = new VertexOut[_vertCount]; for (int i = 0; i < vertCount; i++) { int j = i * 3; bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]); bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]); bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]); } hipFree(dev_bufVertex); hipMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn)); hipMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), hipMemcpyHostToDevice); hipFree(dev_shadedVertices); hipMalloc(&dev_shadedVertices, vertCount * sizeof(VertexOut)); hipFree(dev_primitives); hipMalloc(&dev_primitives, bufIdxSize / 3 * sizeof(Triangle)); hipMemset(dev_primitives, 0, bufIdxSize / 3 * sizeof(Triangle)); delete bufVertex; delete bufVertexOut; checkCUDAError("rasterizeSetBuffers"); } // minimal vertex shader __global__ void minVertexShader(int vertCount, glm::mat4 tf, VertexIn *dev_verticesIn, VertexOut *dev_verticesOut) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < vertCount) { dev_verticesOut[i].pos = tfPoint(tf, dev_verticesIn[i].pos); dev_verticesOut[i].nor = dev_verticesIn[i].nor; glm::vec3 pos = dev_verticesOut[i].pos; // debug glm::vec3 untf = dev_verticesIn[i].pos; // debug dev_verticesOut[i].col = dev_verticesIn[i].col; } } // primitive assembly. 1D linear blocks expected __global__ void minPrimitiveAssembly(int numPrimitives, VertexOut *dev_vertices, Triangle *dev_primitives) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < numPrimitives) { dev_primitives[i].v[0] = dev_vertices[i * 3]; dev_primitives[i].v[1] = dev_vertices[i * 3 + 1]; dev_primitives[i].v[2] = dev_vertices[i * 3 + 2]; } } // scanline rasterization. 1D linear blocks expected __global__ void minScanlineRasterization(int w, int h, int numPrimitives, Triangle *dev_primitives, FragmentIn *dev_frags) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < numPrimitives) { // get the AABB of the triangle glm::vec3 v[3]; v[0] = dev_primitives[i].v[0].pos; v[1] = dev_primitives[i].v[1].pos; v[2] = dev_primitives[i].v[2].pos; AABB triangleBB = getAABBForTriangle(v); // triangle should have been "smooshed" to screen coordinates already. // walk and fill frags. float pixWidth = 2.0f / (float) w; // NDC goes from -1 to 1 in x and y float pixHeight = 2.0f / (float) h; int BBYmin = triangleBB.min.y * (h / 2); int BBYmax = triangleBB.max.y * (h / 2); int BBXmin = triangleBB.min.x * (w / 2); int BBXmax = triangleBB.max.x * (w / 2); // fake clip if (BBYmin < -h / 2) BBYmin = -h / 2; if (BBXmin < -w / 2) BBXmin = -w / 2; if (BBYmax > h / 2) BBYmax = h / 2; if (BBXmax > w / 2) BBXmax = w / 2; float normX = dev_primitives[i].v[0].nor.x; // debug float normY = dev_primitives[i].v[0].nor.y; // debug float normZ = dev_primitives[i].v[0].nor.z; // debug // //float depth0 = v[0].z; //float depth1 = v[1].z; //float depth2 = v[2].z; //float depthz = v[0].x; // scan over the AABB for (int y = BBYmin; y < BBYmax; y++) { for (int x = BBXmin; x < BBXmax; x++) { // compute x y coordinates of the center of "this fragment" //printf("%i %i\n", x, y); glm::vec2 fragCoord = glm::vec2(x * pixWidth + pixWidth * 0.5f, y * pixHeight + pixHeight * 0.5f); // check if it's in dev_primitives[i].v using bary glm::vec3 baryCoordinate = calculateBarycentricCoordinate(v, fragCoord); if (!isBarycentricCoordInBounds(baryCoordinate)) { continue; } // check depth using bary. the version in utils returns a negative z for some reason. float zDepth = -getZAtCoordinate(baryCoordinate, v); if (x == -10 && y == -10 && dev_primitives[i].v[0].nor.y < 0.0f) { // debug printf("zDepth of bottom face is %f\n", zDepth); } // we're pretending NDC is -1 to +1 along each axis // so a fragIndx(0,0) is at NDC -1 -1 // btw, going from NDC back to pixel coordinates: // I've flipped the drawing system, so now it assumes 0,0 is in the bottom left. int fragIndex = (x + (w / 2) - 1) + ((y + (h / 2) - 1) * w); // if all things pass ok, then insert into fragment. float peekDepth = dev_frags[fragIndex].depth; // debug bool closer = zDepth < peekDepth; // debug if (zDepth <= dev_frags[fragIndex].depth) { dev_frags[fragIndex].depth = zDepth; // interpolate color glm::vec3 interpColor = dev_primitives[i].v[0].col * baryCoordinate[0]; interpColor += dev_primitives[i].v[1].col * baryCoordinate[1]; interpColor += dev_primitives[i].v[2].col * baryCoordinate[2]; dev_frags[fragIndex].color = interpColor; // interpolate normal glm::vec3 interpNorm = dev_primitives[i].v[0].nor * baryCoordinate[0]; interpNorm += dev_primitives[i].v[1].nor * baryCoordinate[1]; interpNorm += dev_primitives[i].v[2].nor * baryCoordinate[2]; dev_frags[fragIndex].normal = interpNorm; } } } } } __global__ void minFragmentShading(int numFrags, FragmentIn *dev_fragsIn, FragmentOut *dev_fragsOut) { int fragIndex = 311589; // debug float peekDepth = dev_fragsIn[fragIndex].depth; // debug int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < numFrags) { //dev_fragsOut[i].color = dev_fragsIn[i].color * abs(dev_fragsIn[i].depth); glm::vec3 norm = dev_fragsIn[i].normal; dev_fragsOut[i].color[0] = dev_fragsIn[i].normal[0]; dev_fragsOut[i].color[1] = dev_fragsIn[i].normal[1]; dev_fragsOut[i].color[2] = dev_fragsIn[i].normal[2]; } } /** * Perform rasterization. */ void minRasterizeFirstTry(uchar4 *pbo, glm::mat4 sceneGraphTransform, glm::mat4 cameraMatrix) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockSize1d(sideLength2d * sideLength2d); dim3 blockCount2d_display((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); // TODO: Execute your rasterization pipeline here // (See README for rasterization pipeline outline.) // 1) clear frame buffer with some default value. hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); hipLaunchKernelGGL(( minClearFragsIn) , dim3(blockCount2d_display), dim3(blockSize2d) , 0, 0, width, height, dev_fragsIn); minClearFragsOut << <blockCount2d_display, blockSize2d >> >(width, height, dev_fragsOut); // 2) vertex shade glm::mat4 tf = cameraMatrix * sceneGraphTransform; dim3 blockCount1d_vertices((vertCount - 1) / blockSize1d.x + 1); minVertexShader << <blockCount1d_vertices, blockSize1d >> >(vertCount, tf, dev_bufVertex, dev_shadedVertices); checkCUDAError("debug: vertex shading"); // 3) primitive assembly int numPrimitives = bufIdxSize / 3; dim3 blockCount1d_primitives((numPrimitives - 1) / blockSize1d.x + 1); hipLaunchKernelGGL(( minPrimitiveAssembly), dim3(blockCount1d_primitives), dim3(blockSize1d), 0, 0, numPrimitives, dev_shadedVertices, dev_primitives); checkCUDAError("debug: primitive assembly"); // 4) rasterization hipLaunchKernelGGL(( minScanlineRasterization), dim3(blockCount1d_primitives), dim3(blockSize1d), 0, 0, width, height, numPrimitives, dev_primitives, dev_fragsIn); checkCUDAError("debug: scanline rasterization"); // 5) fragment shading dim3 blockCount1d_fragments(width * height); hipLaunchKernelGGL(( minFragmentShading), dim3(blockCount1d_fragments), dim3(blockSize1d), 0, 0, width * height, dev_fragsIn, dev_fragsOut); checkCUDAError("debug: primitive fragment shading"); // 6) fragments to depth buffer // 7) depth buffer for storing depth testing fragments // 8) frag to frame buffer // Copy depthbuffer colors into framebuffer render << <blockCount2d_display, blockSize2d >> >(width, height, dev_fragsOut, dev_framebuffer); // Copy framebuffer into OpenGL buffer for OpenGL previewing minSendImageToPBO << <blockCount2d_display, blockSize2d >> >(pbo, width, height, dev_framebuffer); checkCUDAError("rasterize"); } /** * Called once at the end of the program to free CUDA memory. */ void minRasterizeFree() { hipFree(dev_bufIdx); dev_bufIdx = NULL; hipFree(dev_bufVertex); dev_bufVertex = NULL; hipFree(dev_shadedVertices); dev_shadedVertices = NULL; hipFree(dev_primitives); dev_primitives = NULL; hipFree(dev_fragsIn); dev_fragsIn = NULL; hipFree(dev_fragsOut); dev_fragsOut = NULL; hipFree(dev_framebuffer); dev_framebuffer = NULL; checkCUDAError("rasterizeFree"); }
e819561eedb7ae565dcb05d72228c69979135baf.cu
/** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya * @date 2012-2015 * @copyright University of Pennsylvania & STUDENT */ #include "rasterize_min.h" #include <cmath> #include <cstdio> #include <cuda.h> #include <thrust/random.h> #include <util/checkCUDAError.h> #include "rasterizeTools.h" struct VertexIn { glm::vec3 pos; glm::vec3 nor; glm::vec3 col; // TODO (optional) add other vertex attributes (e.g. texture coordinates) }; struct VertexOut { // TODO glm::vec3 pos; glm::vec3 nor; glm::vec3 col; }; struct Triangle { VertexOut v[3]; }; struct FragmentIn { glm::vec3 color; glm::vec3 normal; float depth; }; struct FragmentOut { glm::vec3 color; }; static int width = 0; static int height = 0; static int *dev_bufIdx = NULL; // indices buffer static VertexIn *dev_bufVertex = NULL; // buffer of untransformed vertices static VertexOut *dev_shadedVertices = NULL; // buffer of transformed vertices. need? static Triangle *dev_primitives = NULL; // buffer of primitives static FragmentIn *dev_fragsIn = NULL; // buffer of unshaded fragments. static FragmentOut *dev_fragsOut = NULL; // buffer of shaded fragments. basically, pixels. need? static glm::vec3 *dev_framebuffer = NULL; // color frame buffer. need? static int bufIdxSize = 0; static int vertCount = 0; /** * Kernel that clears frags in */ __global__ void minClearFragsIn(int w, int h, FragmentIn *dev_fragsIn) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { dev_fragsIn[x + (y * w)].depth = 0x7ff0000000000000;// 0xfff0000000000000; dev_fragsIn[x + (y * w)].color = glm::vec3(0.0f, 0.0f, 0.0f); dev_fragsIn[x + (y * w)].normal = glm::vec3(0.1f, 0.1f, 0.1f); } } /** * Kernel that clears frags out */ __global__ void minClearFragsOut(int w, int h, FragmentOut *dev_fragsOut) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { dev_fragsOut[x + (y * w)].color = glm::vec3(0.0f, 0.0f, 0.0f); } } /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void minSendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } // Writes fragment colors to the framebuffer __global__ void render(int w, int h, FragmentOut *frags, glm::vec3 *framebuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); // frameBuffer code assumes (0,0) is at the bottom left in pix coords, // but this code assumes it's at the top right. int frameBufferIndex = (w - 1 - x) + (h - 1 - y) * w; if (x < w && y < h) { framebuffer[index] = frags[frameBufferIndex].color; } } /** * Called once at the beginning of the program to allocate memory. */ void minRasterizeInit(int w, int h) { width = w; height = h; cudaFree(dev_fragsIn); cudaMalloc(&dev_fragsIn, width * height * sizeof(FragmentIn)); cudaMemset(dev_fragsIn, 0, width * height * sizeof(FragmentIn)); cudaFree(dev_fragsOut); cudaMalloc(&dev_fragsOut, width * height * sizeof(FragmentOut)); cudaMemset(dev_fragsOut, 0, width * height * sizeof(FragmentOut)); cudaFree(dev_framebuffer); cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3)); cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); checkCUDAError("rasterizeInit"); } /** * Set all of the buffers necessary for rasterization. */ void minRasterizeSetBuffers( int _bufIdxSize, int *bufIdx, int _vertCount, float *bufPos, float *bufNor, float *bufCol) { bufIdxSize = _bufIdxSize; vertCount = _vertCount; cudaFree(dev_bufIdx); cudaMalloc(&dev_bufIdx, bufIdxSize * sizeof(int)); cudaMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), cudaMemcpyHostToDevice); VertexIn *bufVertex = new VertexIn[_vertCount]; VertexOut *bufVertexOut = new VertexOut[_vertCount]; for (int i = 0; i < vertCount; i++) { int j = i * 3; bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]); bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]); bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]); } cudaFree(dev_bufVertex); cudaMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn)); cudaMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), cudaMemcpyHostToDevice); cudaFree(dev_shadedVertices); cudaMalloc(&dev_shadedVertices, vertCount * sizeof(VertexOut)); cudaFree(dev_primitives); cudaMalloc(&dev_primitives, bufIdxSize / 3 * sizeof(Triangle)); cudaMemset(dev_primitives, 0, bufIdxSize / 3 * sizeof(Triangle)); delete bufVertex; delete bufVertexOut; checkCUDAError("rasterizeSetBuffers"); } // minimal vertex shader __global__ void minVertexShader(int vertCount, glm::mat4 tf, VertexIn *dev_verticesIn, VertexOut *dev_verticesOut) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < vertCount) { dev_verticesOut[i].pos = tfPoint(tf, dev_verticesIn[i].pos); dev_verticesOut[i].nor = dev_verticesIn[i].nor; glm::vec3 pos = dev_verticesOut[i].pos; // debug glm::vec3 untf = dev_verticesIn[i].pos; // debug dev_verticesOut[i].col = dev_verticesIn[i].col; } } // primitive assembly. 1D linear blocks expected __global__ void minPrimitiveAssembly(int numPrimitives, VertexOut *dev_vertices, Triangle *dev_primitives) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < numPrimitives) { dev_primitives[i].v[0] = dev_vertices[i * 3]; dev_primitives[i].v[1] = dev_vertices[i * 3 + 1]; dev_primitives[i].v[2] = dev_vertices[i * 3 + 2]; } } // scanline rasterization. 1D linear blocks expected __global__ void minScanlineRasterization(int w, int h, int numPrimitives, Triangle *dev_primitives, FragmentIn *dev_frags) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < numPrimitives) { // get the AABB of the triangle glm::vec3 v[3]; v[0] = dev_primitives[i].v[0].pos; v[1] = dev_primitives[i].v[1].pos; v[2] = dev_primitives[i].v[2].pos; AABB triangleBB = getAABBForTriangle(v); // triangle should have been "smooshed" to screen coordinates already. // walk and fill frags. float pixWidth = 2.0f / (float) w; // NDC goes from -1 to 1 in x and y float pixHeight = 2.0f / (float) h; int BBYmin = triangleBB.min.y * (h / 2); int BBYmax = triangleBB.max.y * (h / 2); int BBXmin = triangleBB.min.x * (w / 2); int BBXmax = triangleBB.max.x * (w / 2); // fake clip if (BBYmin < -h / 2) BBYmin = -h / 2; if (BBXmin < -w / 2) BBXmin = -w / 2; if (BBYmax > h / 2) BBYmax = h / 2; if (BBXmax > w / 2) BBXmax = w / 2; float normX = dev_primitives[i].v[0].nor.x; // debug float normY = dev_primitives[i].v[0].nor.y; // debug float normZ = dev_primitives[i].v[0].nor.z; // debug // //float depth0 = v[0].z; //float depth1 = v[1].z; //float depth2 = v[2].z; //float depthz = v[0].x; // scan over the AABB for (int y = BBYmin; y < BBYmax; y++) { for (int x = BBXmin; x < BBXmax; x++) { // compute x y coordinates of the center of "this fragment" //printf("%i %i\n", x, y); glm::vec2 fragCoord = glm::vec2(x * pixWidth + pixWidth * 0.5f, y * pixHeight + pixHeight * 0.5f); // check if it's in dev_primitives[i].v using bary glm::vec3 baryCoordinate = calculateBarycentricCoordinate(v, fragCoord); if (!isBarycentricCoordInBounds(baryCoordinate)) { continue; } // check depth using bary. the version in utils returns a negative z for some reason. float zDepth = -getZAtCoordinate(baryCoordinate, v); if (x == -10 && y == -10 && dev_primitives[i].v[0].nor.y < 0.0f) { // debug printf("zDepth of bottom face is %f\n", zDepth); } // we're pretending NDC is -1 to +1 along each axis // so a fragIndx(0,0) is at NDC -1 -1 // btw, going from NDC back to pixel coordinates: // I've flipped the drawing system, so now it assumes 0,0 is in the bottom left. int fragIndex = (x + (w / 2) - 1) + ((y + (h / 2) - 1) * w); // if all things pass ok, then insert into fragment. float peekDepth = dev_frags[fragIndex].depth; // debug bool closer = zDepth < peekDepth; // debug if (zDepth <= dev_frags[fragIndex].depth) { dev_frags[fragIndex].depth = zDepth; // interpolate color glm::vec3 interpColor = dev_primitives[i].v[0].col * baryCoordinate[0]; interpColor += dev_primitives[i].v[1].col * baryCoordinate[1]; interpColor += dev_primitives[i].v[2].col * baryCoordinate[2]; dev_frags[fragIndex].color = interpColor; // interpolate normal glm::vec3 interpNorm = dev_primitives[i].v[0].nor * baryCoordinate[0]; interpNorm += dev_primitives[i].v[1].nor * baryCoordinate[1]; interpNorm += dev_primitives[i].v[2].nor * baryCoordinate[2]; dev_frags[fragIndex].normal = interpNorm; } } } } } __global__ void minFragmentShading(int numFrags, FragmentIn *dev_fragsIn, FragmentOut *dev_fragsOut) { int fragIndex = 311589; // debug float peekDepth = dev_fragsIn[fragIndex].depth; // debug int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < numFrags) { //dev_fragsOut[i].color = dev_fragsIn[i].color * abs(dev_fragsIn[i].depth); glm::vec3 norm = dev_fragsIn[i].normal; dev_fragsOut[i].color[0] = dev_fragsIn[i].normal[0]; dev_fragsOut[i].color[1] = dev_fragsIn[i].normal[1]; dev_fragsOut[i].color[2] = dev_fragsIn[i].normal[2]; } } /** * Perform rasterization. */ void minRasterizeFirstTry(uchar4 *pbo, glm::mat4 sceneGraphTransform, glm::mat4 cameraMatrix) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockSize1d(sideLength2d * sideLength2d); dim3 blockCount2d_display((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); // TODO: Execute your rasterization pipeline here // (See README for rasterization pipeline outline.) // 1) clear frame buffer with some default value. cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); minClearFragsIn <<<blockCount2d_display, blockSize2d >>>(width, height, dev_fragsIn); minClearFragsOut << <blockCount2d_display, blockSize2d >> >(width, height, dev_fragsOut); // 2) vertex shade glm::mat4 tf = cameraMatrix * sceneGraphTransform; dim3 blockCount1d_vertices((vertCount - 1) / blockSize1d.x + 1); minVertexShader << <blockCount1d_vertices, blockSize1d >> >(vertCount, tf, dev_bufVertex, dev_shadedVertices); checkCUDAError("debug: vertex shading"); // 3) primitive assembly int numPrimitives = bufIdxSize / 3; dim3 blockCount1d_primitives((numPrimitives - 1) / blockSize1d.x + 1); minPrimitiveAssembly<<<blockCount1d_primitives, blockSize1d>>>(numPrimitives, dev_shadedVertices, dev_primitives); checkCUDAError("debug: primitive assembly"); // 4) rasterization minScanlineRasterization<<<blockCount1d_primitives, blockSize1d>>>(width, height, numPrimitives, dev_primitives, dev_fragsIn); checkCUDAError("debug: scanline rasterization"); // 5) fragment shading dim3 blockCount1d_fragments(width * height); minFragmentShading<<<blockCount1d_fragments, blockSize1d>>>(width * height, dev_fragsIn, dev_fragsOut); checkCUDAError("debug: primitive fragment shading"); // 6) fragments to depth buffer // 7) depth buffer for storing depth testing fragments // 8) frag to frame buffer // Copy depthbuffer colors into framebuffer render << <blockCount2d_display, blockSize2d >> >(width, height, dev_fragsOut, dev_framebuffer); // Copy framebuffer into OpenGL buffer for OpenGL previewing minSendImageToPBO << <blockCount2d_display, blockSize2d >> >(pbo, width, height, dev_framebuffer); checkCUDAError("rasterize"); } /** * Called once at the end of the program to free CUDA memory. */ void minRasterizeFree() { cudaFree(dev_bufIdx); dev_bufIdx = NULL; cudaFree(dev_bufVertex); dev_bufVertex = NULL; cudaFree(dev_shadedVertices); dev_shadedVertices = NULL; cudaFree(dev_primitives); dev_primitives = NULL; cudaFree(dev_fragsIn); dev_fragsIn = NULL; cudaFree(dev_fragsOut); dev_fragsOut = NULL; cudaFree(dev_framebuffer); dev_framebuffer = NULL; checkCUDAError("rasterizeFree"); }
ed4c5f16e237a8b1cec43ad96c7dcfa9f2a149ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S1_4.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5808390037434,0.00128660871673998,0.780017411965445,0.779866089420134,0.000174427830947983,0.485221044706665,0.00293766951726531,0.999998352403933,1.92945075222659e-08,1.88789743418140e-05,0.999774028269383,1.00656274895341,0.999980305363904,5.75119942688369e-05,0.652562498130868,9.24127402937561,140.252453661949}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.1821693920716,0.000262369857178200,0.000171567529876738,0.000414005106483591,0.297500226048348,0.162622717394298,0.207515183338143,3.39980849488085,0.0224798791846427,2.56467648820225,1096.76282222310,0.000572145335603343,0.124382279366777,0.0197003709329121,0.00191117528600119,6.10868623397025e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
ed4c5f16e237a8b1cec43ad96c7dcfa9f2a149ec.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S1_4.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5808390037434,0.00128660871673998,0.780017411965445,0.779866089420134,0.000174427830947983,0.485221044706665,0.00293766951726531,0.999998352403933,1.92945075222659e-08,1.88789743418140e-05,0.999774028269383,1.00656274895341,0.999980305363904,5.75119942688369e-05,0.652562498130868,9.24127402937561,140.252453661949}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.1821693920716,0.000262369857178200,0.000171567529876738,0.000414005106483591,0.297500226048348,0.162622717394298,0.207515183338143,3.39980849488085,0.0224798791846427,2.56467648820225,1096.76282222310,0.000572145335603343,0.124382279366777,0.0197003709329121,0.00191117528600119,6.10868623397025e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
ebddf312902295899b7cb3da1bdb3043a3f2a281.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/hip/HIPContext.h> #include <torch/extension.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include "nn/cuda/mapped_im2col.cuh" namespace mapped_conv { namespace nn { namespace cuda { torch::Tensor MappedConvForward(torch::Tensor input, torch::Tensor sample_map, torch::Tensor weight, torch::Tensor bias, int64_t kernel_size, int64_t interpolation) { // Useful dimensions to have const int64_t nOutputPlanes = weight.size(0); const int64_t nInputPlanes = weight.size(1); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t outputHeight = sample_map.size(0); const int64_t outputWidth = sample_map.size(1); const int64_t batchSize = input.size(0); // Initialize output and temporary columns torch::Tensor output = torch::zeros({batchSize, nOutputPlanes, outputHeight, outputWidth}, input.options()) + 10; torch::Tensor columns = torch::zeros({kernel_size * nInputPlanes, outputHeight * outputWidth}, input.options()); // For each elt in batch, do: const int64_t outputBatchStride = nOutputPlanes * outputHeight * outputWidth; for (int64_t b = 0; b < batchSize; b++) { // CUDA mapped_im2col MappedIm2Col2DLauncher(input[b], sample_map, nInputPlanes, inputHeight, inputWidth, outputWidth, columns.size(1), kernel_size, interpolation, columns); // Get cuda stream hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipblasSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t k = weight.size(1) * weight.size(2); const int64_t n = weight.size(0); if (input.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), m, weight.data<double>(), k, &beta, output.data<double>() + b * outputBatchStride, m); } else if (input.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), m, weight.data<float>(), k, &beta, output.data<float>() + b * outputBatchStride, m); } CUDA_CHECK(hipGetLastError()) // Use PyTorch to add the bias output[b] += bias.view({output[b].size(0), 1, 1}); } return output; } torch::Tensor MappedConvBackwardInput(torch::Tensor grad_output, torch::Tensor sample_map, torch::Tensor weight, int64_t inputHeight, int64_t inputWidth, int64_t kernel_size, int64_t interpolation) { // Useful dimensions to have const int64_t nOutputPlanes = weight.size(0); const int64_t nInputPlanes = weight.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t batchSize = grad_output.size(0); // Initialize output and temporary columns torch::Tensor input_grad = torch::zeros({batchSize, nInputPlanes, inputHeight, inputWidth}, grad_output.options()); torch::Tensor columns = torch::zeros({kernel_size * nInputPlanes, outputHeight * outputWidth}, grad_output.options()); // For each elt in batch, do: const int64_t outputBatchStride = nOutputPlanes * outputHeight * outputWidth; for (int64_t b = 0; b < batchSize; b++) { // Get cuda stream hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipblasSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t n = weight.size(1) * weight.size(2); const int64_t k = weight.size(0); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha, grad_output.data<double>() + b * outputBatchStride, m, // lda=N weight.data<double>(), n, // ldb=ck^2 &beta, columns.data<double>(), m); // ldc=N } else if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha, grad_output.data<float>() + b * outputBatchStride, m, // lda=N weight.data<float>(), n, // ldb=ck^2 &beta, columns.data<float>(), m); // ldc=N } CUDA_CHECK(hipGetLastError()) MappedCol2Im2DLauncher(columns, sample_map, nInputPlanes, inputHeight, inputWidth, outputWidth, columns.size(1), kernel_size, interpolation, input_grad[b]); } return input_grad; } torch::Tensor MappedConvBackwardWeight(torch::Tensor grad_output, torch::Tensor sample_map, torch::Tensor input, int64_t kernel_size, int64_t interpolation) { // Useful dimensions to have const int64_t nOutputPlanes = grad_output.size(1); const int64_t nInputPlanes = input.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t batchSize = grad_output.size(0); // Initialize output and temporary columns torch::Tensor weight_grad = torch::zeros( {nOutputPlanes, nInputPlanes, kernel_size}, grad_output.options()); torch::Tensor columns = torch::zeros({kernel_size * nInputPlanes, outputHeight * outputWidth}, grad_output.options()); // For each elt in batch, do: const int64_t outputBatchStride = nOutputPlanes * outputHeight * outputWidth; for (int64_t b = 0; b < batchSize; b++) { // Create the column matrix from the input as we would in // mapped_conv_forward MappedIm2Col2DLauncher(input[b], sample_map, nInputPlanes, inputHeight, inputWidth, outputWidth, columns.size(1), kernel_size, interpolation, columns); // Get cuda stream hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipblasSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); // Propagate the gradients from the outputs to the weights using GEMM // Note that GEMM expects column major matrices const int64_t m = weight_grad.size(1) * weight_grad.size(2); const int64_t n = weight_grad.size(0); const int64_t k = columns.size(1); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 1.0; hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), k, grad_output.data<double>() + b * outputBatchStride, k, &beta, weight_grad.data<double>(), m); } if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 1.0; hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), k, grad_output.data<float>() + b * outputBatchStride, k, &beta, weight_grad.data<float>(), m); } CUDA_CHECK(hipGetLastError()) } return weight_grad; } } // namespace cuda } // namespace nn } // namespace mapped_conv
ebddf312902295899b7cb3da1bdb3043a3f2a281.cu
#include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include <cublas_v2.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include "nn/cuda/mapped_im2col.cuh" namespace mapped_conv { namespace nn { namespace cuda { torch::Tensor MappedConvForward(torch::Tensor input, torch::Tensor sample_map, torch::Tensor weight, torch::Tensor bias, int64_t kernel_size, int64_t interpolation) { // Useful dimensions to have const int64_t nOutputPlanes = weight.size(0); const int64_t nInputPlanes = weight.size(1); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t outputHeight = sample_map.size(0); const int64_t outputWidth = sample_map.size(1); const int64_t batchSize = input.size(0); // Initialize output and temporary columns torch::Tensor output = torch::zeros({batchSize, nOutputPlanes, outputHeight, outputWidth}, input.options()) + 10; torch::Tensor columns = torch::zeros({kernel_size * nInputPlanes, outputHeight * outputWidth}, input.options()); // For each elt in batch, do: const int64_t outputBatchStride = nOutputPlanes * outputHeight * outputWidth; for (int64_t b = 0; b < batchSize; b++) { // CUDA mapped_im2col MappedIm2Col2DLauncher(input[b], sample_map, nInputPlanes, inputHeight, inputWidth, outputWidth, columns.size(1), kernel_size, interpolation, columns); // Get cuda stream cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasSetStream(handle, at::cuda::getCurrentCUDAStream()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t k = weight.size(1) * weight.size(2); const int64_t n = weight.size(0); if (input.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), m, weight.data<double>(), k, &beta, output.data<double>() + b * outputBatchStride, m); } else if (input.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), m, weight.data<float>(), k, &beta, output.data<float>() + b * outputBatchStride, m); } CUDA_CHECK(cudaGetLastError()) // Use PyTorch to add the bias output[b] += bias.view({output[b].size(0), 1, 1}); } return output; } torch::Tensor MappedConvBackwardInput(torch::Tensor grad_output, torch::Tensor sample_map, torch::Tensor weight, int64_t inputHeight, int64_t inputWidth, int64_t kernel_size, int64_t interpolation) { // Useful dimensions to have const int64_t nOutputPlanes = weight.size(0); const int64_t nInputPlanes = weight.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t batchSize = grad_output.size(0); // Initialize output and temporary columns torch::Tensor input_grad = torch::zeros({batchSize, nInputPlanes, inputHeight, inputWidth}, grad_output.options()); torch::Tensor columns = torch::zeros({kernel_size * nInputPlanes, outputHeight * outputWidth}, grad_output.options()); // For each elt in batch, do: const int64_t outputBatchStride = nOutputPlanes * outputHeight * outputWidth; for (int64_t b = 0; b < batchSize; b++) { // Get cuda stream cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasSetStream(handle, at::cuda::getCurrentCUDAStream()); // Call the GEMM function (note that it expects column major matrices) const int64_t m = columns.size(1); const int64_t n = weight.size(1) * weight.size(2); const int64_t k = weight.size(0); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 0.0; cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, grad_output.data<double>() + b * outputBatchStride, m, // lda=N weight.data<double>(), n, // ldb=ck^2 &beta, columns.data<double>(), m); // ldc=N } else if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 0.0; cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, grad_output.data<float>() + b * outputBatchStride, m, // lda=N weight.data<float>(), n, // ldb=ck^2 &beta, columns.data<float>(), m); // ldc=N } CUDA_CHECK(cudaGetLastError()) MappedCol2Im2DLauncher(columns, sample_map, nInputPlanes, inputHeight, inputWidth, outputWidth, columns.size(1), kernel_size, interpolation, input_grad[b]); } return input_grad; } torch::Tensor MappedConvBackwardWeight(torch::Tensor grad_output, torch::Tensor sample_map, torch::Tensor input, int64_t kernel_size, int64_t interpolation) { // Useful dimensions to have const int64_t nOutputPlanes = grad_output.size(1); const int64_t nInputPlanes = input.size(1); const int64_t outputHeight = grad_output.size(2); const int64_t outputWidth = grad_output.size(3); const int64_t inputHeight = input.size(2); const int64_t inputWidth = input.size(3); const int64_t batchSize = grad_output.size(0); // Initialize output and temporary columns torch::Tensor weight_grad = torch::zeros( {nOutputPlanes, nInputPlanes, kernel_size}, grad_output.options()); torch::Tensor columns = torch::zeros({kernel_size * nInputPlanes, outputHeight * outputWidth}, grad_output.options()); // For each elt in batch, do: const int64_t outputBatchStride = nOutputPlanes * outputHeight * outputWidth; for (int64_t b = 0; b < batchSize; b++) { // Create the column matrix from the input as we would in // mapped_conv_forward MappedIm2Col2DLauncher(input[b], sample_map, nInputPlanes, inputHeight, inputWidth, outputWidth, columns.size(1), kernel_size, interpolation, columns); // Get cuda stream cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasSetStream(handle, at::cuda::getCurrentCUDAStream()); // Propagate the gradients from the outputs to the weights using GEMM // Note that GEMM expects column major matrices const int64_t m = weight_grad.size(1) * weight_grad.size(2); const int64_t n = weight_grad.size(0); const int64_t k = columns.size(1); if (grad_output.dtype() == torch::kDouble) { const double alpha = 1.0; const double beta = 1.0; cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, columns.data<double>(), k, grad_output.data<double>() + b * outputBatchStride, k, &beta, weight_grad.data<double>(), m); } if (grad_output.dtype() == torch::kFloat) { const float alpha = 1.0; const float beta = 1.0; cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, columns.data<float>(), k, grad_output.data<float>() + b * outputBatchStride, k, &beta, weight_grad.data<float>(), m); } CUDA_CHECK(cudaGetLastError()) } return weight_grad; } } // namespace cuda } // namespace nn } // namespace mapped_conv
c29ed9469ee63a58b58b530800f2a1a8088e7fed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // R-FCN // Written by Yi Li, 2016. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_context_pooling_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIContextPoolingForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_union_rois, const Dtype* bottom_obj_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_union_rois += n * 5; int roi_batch_ind = bottom_union_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_union_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_union_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_union_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_union_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; int roi_obj_start_w = floor(static_cast<Dtype>(round(bottom_obj_rois[1])) * spatial_scale); int roi_obj_start_h = floor(static_cast<Dtype>(round(bottom_obj_rois[2])) * spatial_scale); int roi_obj_end_w = ceil(static_cast<Dtype>(round(bottom_obj_rois[3]) + 1.) * spatial_scale); int roi_obj_end_h = ceil(static_cast<Dtype>(round(bottom_obj_rois[4]) + 1.) * spatial_scale); roi_obj_start_w = min(max(roi_obj_start_w,0),width); roi_obj_start_h = min(max(roi_obj_start_h,0),height); roi_obj_end_w = min(max(roi_obj_end_w,0),width); roi_obj_end_h = min(max(roi_obj_end_h,0),height); // if (RectA.X1 < RectB.X2 && RectA.X2 > RectB.X1 && // RectA.Y1 < RectB.Y2 && RectA.Y2 > RectB.Y1) if(!(wstart<roi_obj_end_w && wend>roi_obj_start_w && hstart < roi_obj_end_h && hend > roi_obj_start_h)) { //printf("union = %d %d %d %d obj = %d %d %d %d\n", wstart , hstart , wend , hend //,roi_obj_start_w , roi_obj_start_h,roi_obj_end_w, roi_obj_end_h); top_data[index] = 0.0; //mapping_channel[index]=c; //is_empty = true; continue; } bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } Dtype bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } template <typename Dtype> void PSROIContextPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_union_rois = bottom[1]->gpu_data(); const Dtype* bottom_obj_rois = bottom[2]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIContextPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_union_rois,bottom_obj_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIContextPoolingBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_union_rois, const Dtype* bottom_obj_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_union_rois += n * 5; int roi_batch_ind = bottom_union_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_union_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_union_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_union_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_union_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; int roi_obj_start_w = floor(static_cast<Dtype>(round(bottom_obj_rois[1])) * spatial_scale); int roi_obj_start_h = floor(static_cast<Dtype>(round(bottom_obj_rois[2])) * spatial_scale); int roi_obj_end_w = ceil(static_cast<Dtype>(round(bottom_obj_rois[3]) + 1.) * spatial_scale); int roi_obj_end_h = ceil(static_cast<Dtype>(round(bottom_obj_rois[4]) + 1.) * spatial_scale); roi_obj_start_w = min(max(roi_obj_start_w,0),width); roi_obj_start_h = min(max(roi_obj_start_h,0),height); roi_obj_end_w = min(max(roi_obj_end_w,0),width); roi_obj_end_h = min(max(roi_obj_end_h,0),height); // if (RectA.X1 < RectB.X2 && RectA.X2 > RectB.X1 && // RectA.Y1 < RectB.Y2 && RectA.Y2 > RectB.Y1) if(!(wstart<roi_obj_end_w && wend>roi_obj_start_w && hstart < roi_obj_end_h && hend > roi_obj_start_h)) { //printf("union = %d %d %d %d obj = %d %d %d %d\n", wstart , hstart , wend , hend //,roi_obj_start_w , roi_obj_start_h,roi_obj_end_w, roi_obj_end_h); //top_data[index] = 0.0; //mapping_channel[index]=c; //is_empty = true; continue; } Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype bin_area = (hend - hstart)*(wend - wstart); Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } template <typename Dtype> void PSROIContextPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_union_rois = bottom[1]->gpu_data(); const Dtype* bottom_obj_rois = bottom[2]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom[2]->count(), Dtype(0), bottom[2]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIContextPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, bottom_diff, bottom_union_rois, bottom_obj_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIContextPoolingLayer); } // namespace caffe
c29ed9469ee63a58b58b530800f2a1a8088e7fed.cu
// -------------------------------------------------------- // R-FCN // Written by Yi Li, 2016. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_context_pooling_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIContextPoolingForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_union_rois, const Dtype* bottom_obj_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_union_rois += n * 5; int roi_batch_ind = bottom_union_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_union_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_union_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_union_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_union_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; int roi_obj_start_w = floor(static_cast<Dtype>(round(bottom_obj_rois[1])) * spatial_scale); int roi_obj_start_h = floor(static_cast<Dtype>(round(bottom_obj_rois[2])) * spatial_scale); int roi_obj_end_w = ceil(static_cast<Dtype>(round(bottom_obj_rois[3]) + 1.) * spatial_scale); int roi_obj_end_h = ceil(static_cast<Dtype>(round(bottom_obj_rois[4]) + 1.) * spatial_scale); roi_obj_start_w = min(max(roi_obj_start_w,0),width); roi_obj_start_h = min(max(roi_obj_start_h,0),height); roi_obj_end_w = min(max(roi_obj_end_w,0),width); roi_obj_end_h = min(max(roi_obj_end_h,0),height); // if (RectA.X1 < RectB.X2 && RectA.X2 > RectB.X1 && // RectA.Y1 < RectB.Y2 && RectA.Y2 > RectB.Y1) if(!(wstart<roi_obj_end_w && wend>roi_obj_start_w && hstart < roi_obj_end_h && hend > roi_obj_start_h)) { //printf("union = %d %d %d %d obj = %d %d %d %d\n", wstart , hstart , wend , hend //,roi_obj_start_w , roi_obj_start_h,roi_obj_end_w, roi_obj_end_h); top_data[index] = 0.0; //mapping_channel[index]=c; //is_empty = true; continue; } bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } Dtype bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } template <typename Dtype> void PSROIContextPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_union_rois = bottom[1]->gpu_data(); const Dtype* bottom_obj_rois = bottom[2]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIContextPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_union_rois,bottom_obj_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIContextPoolingBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_union_rois, const Dtype* bottom_obj_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_union_rois += n * 5; int roi_batch_ind = bottom_union_rois[0]; Dtype roi_start_w = static_cast<Dtype>(round(bottom_union_rois[1])) * spatial_scale; Dtype roi_start_h = static_cast<Dtype>(round(bottom_union_rois[2])) * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(round(bottom_union_rois[3]) + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(round(bottom_union_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; int roi_obj_start_w = floor(static_cast<Dtype>(round(bottom_obj_rois[1])) * spatial_scale); int roi_obj_start_h = floor(static_cast<Dtype>(round(bottom_obj_rois[2])) * spatial_scale); int roi_obj_end_w = ceil(static_cast<Dtype>(round(bottom_obj_rois[3]) + 1.) * spatial_scale); int roi_obj_end_h = ceil(static_cast<Dtype>(round(bottom_obj_rois[4]) + 1.) * spatial_scale); roi_obj_start_w = min(max(roi_obj_start_w,0),width); roi_obj_start_h = min(max(roi_obj_start_h,0),height); roi_obj_end_w = min(max(roi_obj_end_w,0),width); roi_obj_end_h = min(max(roi_obj_end_h,0),height); // if (RectA.X1 < RectB.X2 && RectA.X2 > RectB.X1 && // RectA.Y1 < RectB.Y2 && RectA.Y2 > RectB.Y1) if(!(wstart<roi_obj_end_w && wend>roi_obj_start_w && hstart < roi_obj_end_h && hend > roi_obj_start_h)) { //printf("union = %d %d %d %d obj = %d %d %d %d\n", wstart , hstart , wend , hend //,roi_obj_start_w , roi_obj_start_h,roi_obj_end_w, roi_obj_end_h); //top_data[index] = 0.0; //mapping_channel[index]=c; //is_empty = true; continue; } Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype bin_area = (hend - hstart)*(wend - wstart); Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h*width + w; caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } template <typename Dtype> void PSROIContextPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_union_rois = bottom[1]->gpu_data(); const Dtype* bottom_obj_rois = bottom[2]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom[2]->count(), Dtype(0), bottom[2]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIContextPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, bottom_diff, bottom_union_rois, bottom_obj_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIContextPoolingLayer); } // namespace caffe
8e986ded7223d54e8176c24922181b9e6c074109.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/adam.h" #include "orttraining/training_ops/cuda/optimizer/common.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM> __global__ void _AdamOptimizer_mode0( const T1* eta, const T3* weights, const T_GRAD* grads, const T4* moment_1, const T4* moment_2, const T3* loss_scale, const T_GRAD_NORM* grad_norm, const T4 alpha, const T4 beta, const T4 lambda, const T4 epsilon, const T4 alpha_correction, const T4 beta_correction, T4* moment_1_out, T4* moment_2_out, T3* weights_out, T_GRAD* grads_out, half* fp16_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm); // Gradient scaling/clipping. const T4 g = T4(grads[id]) / actual_scale; // A shared constant. const T4 one = T4(1.0f); // Compute exponentially-averaged historical gradient. const T4 m1o = alpha * moment_1[id] + (one - alpha) * g; const T4 m1o_corrected = m1o / alpha_correction; // Compute exponentially-averaged historical squared gradient. const T4 m2o = beta * moment_2[id] + (one - beta) * g * g; const T4 m2o_corrected = m2o / beta_correction; // Compute weight update. const T4 denom = _Sqrt(m2o_corrected) + epsilon; const T4 update = (m1o_corrected / denom) + (lambda * T4(weights[id])); const T4 delta = -T4(*eta) * update; // Compute the new gradient. if (grads_out) { grads_out[id] = T_GRAD(delta); } // Compute the new weight. if (weights_out) { weights_out[id] = weights[id] + T3(delta); if (fp16_weights_out) { fp16_weights_out[id] = static_cast<half>(weights_out[id]); } } moment_1_out[id] = m1o; moment_2_out[id] = m2o; } template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM> __global__ void _AdamOptimizer_mode1( const T1* eta, const T3* weights, const T_GRAD* grads, const T4* moment_1, const T4* moment_2, const T3* loss_scale, const T_GRAD_NORM* grad_norm, const T4 alpha, const T4 beta, const T4 lambda, const T4 epsilon, const T4 alpha_correction, const T4 beta_correction, T4* moment_1_out, T4* moment_2_out, T3* weights_out, T_GRAD* grads_out, half* fp16_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm); // Gradient scaling/clipping. const T4 g = T4(grads[id]) / actual_scale; // A shared constant. const T4 one = T4(1.0f); // Compute exponentially-averaged historical gradient. const T4 m1o = alpha * moment_1[id] + (one - alpha) * g; // Compute exponentially-averaged historical squared gradient. const T4 m2o = beta * moment_2[id] + (one - beta) * g * g; const T4 denom = _Sqrt(m2o) + epsilon; // Apply bias correction terms on learning rate const T4 step_size = T4(*eta) * _Sqrt(beta_correction) / alpha_correction; // Huggingface updates weights in the following logic: // param' = param - step_size * m1o / denom // param_out = param' - original_lr * lambda * param' // then param_out = param - step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom) // so delta = -step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom) const T4 delta = -step_size * m1o / denom - T4(*eta) * lambda * (T4(weights[id]) - step_size * m1o / denom); // Compute the new gradient. if (grads_out) { grads_out[id] = T_GRAD(delta); } // Compute the new weight. if (weights_out) { weights_out[id] = weights[id] + T3(delta); if (fp16_weights_out) { fp16_weights_out[id] = static_cast<half>(weights_out[id]); } } moment_1_out[id] = m1o; moment_2_out[id] = m2o; } template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM> void AdamOptimizerImpl( const T1* eta, const T2 update_count, const T3* weights, const T_GRAD* grads, const T4* moment_1, const T4* moment_2, const T3* loss_scale, const T_GRAD_NORM* grad_norm, const T4 alpha, const T4 beta, const T4 lambda, const T4 epsilon, const bool do_bias_correction, const int64_t weight_decay_mode, T4* moment_1_out, T4* moment_2_out, T3* weights_out, T_GRAD* grads_out, half* fp16_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); // If bias correction coefficients are set to 1s, it's equivalent to disabling bias correction. const T4 alpha_correction = do_bias_correction ? onnxruntime::contrib::compute_bias_correction_coefficient(alpha, update_count) : T4(1.f); const T4 beta_correction = do_bias_correction ? onnxruntime::contrib::compute_bias_correction_coefficient(beta, update_count) : T4(1.f); // Currently two modes of Adamw are supported: // Mode 0: Pytorch https://pytorch.org/docs/stable/_modules/torch/optim/adamw.html#AdamW, // bias correction is applied on m and v individually, // weight decay is applied before weight is updated. // Mode 1: Huggingface https://huggingface.co/transformers/_modules/transformers/optimization.html#AdamW., // bias correction is applied on learning rate, // weight decay is applied after weight is updated. if (weight_decay_mode == 0) { hipLaunchKernelGGL(( _AdamOptimizer_mode0<T1, T3, T4, T_GRAD, T_GRAD_NORM>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, eta, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, alpha_correction, beta_correction, moment_1_out, moment_2_out, weights_out, grads_out, fp16_weights_out, N); } else if (weight_decay_mode == 1) { hipLaunchKernelGGL(( _AdamOptimizer_mode1<T1, T3, T4, T_GRAD, T_GRAD_NORM>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, eta, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, alpha_correction, beta_correction, moment_1_out, moment_2_out, weights_out, grads_out, fp16_weights_out, N); } else { // Shouldn't reach here ORT_THROW("Unsupported Adamw optimizer mode."); } } #define SPECIALIZED_AdamOptimizerImpl(T1, T2, T3, T4, T_GRAD, T_GRAD_NORM) \ template void AdamOptimizerImpl( \ const T1* eta, \ const T2 update_count, \ const T3* weights, \ const T_GRAD* grads, \ const T4* moment_1, \ const T4* moment_2, \ const T3* loss_scale, \ const T_GRAD_NORM* grad_norm, \ const T4 alpha, \ const T4 beta, \ const T4 lambda, \ const T4 epsilon, \ const bool do_bias_correction, \ const int64_t weight_decay_mode, \ T4* moment_1_out, \ T4* moment_2_out, \ T3* weights_out, \ T_GRAD* grads_out, \ half* fp16_weights_out, \ size_t count); SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, float, float) SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, float, float) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, float, float) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, half) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, float) SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, half) SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, float) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, half) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, float) } // namespace cuda } // namespace onnxruntime
8e986ded7223d54e8176c24922181b9e6c074109.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/adam.h" #include "orttraining/training_ops/cuda/optimizer/common.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM> __global__ void _AdamOptimizer_mode0( const T1* eta, const T3* weights, const T_GRAD* grads, const T4* moment_1, const T4* moment_2, const T3* loss_scale, const T_GRAD_NORM* grad_norm, const T4 alpha, const T4 beta, const T4 lambda, const T4 epsilon, const T4 alpha_correction, const T4 beta_correction, T4* moment_1_out, T4* moment_2_out, T3* weights_out, T_GRAD* grads_out, half* fp16_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm); // Gradient scaling/clipping. const T4 g = T4(grads[id]) / actual_scale; // A shared constant. const T4 one = T4(1.0f); // Compute exponentially-averaged historical gradient. const T4 m1o = alpha * moment_1[id] + (one - alpha) * g; const T4 m1o_corrected = m1o / alpha_correction; // Compute exponentially-averaged historical squared gradient. const T4 m2o = beta * moment_2[id] + (one - beta) * g * g; const T4 m2o_corrected = m2o / beta_correction; // Compute weight update. const T4 denom = _Sqrt(m2o_corrected) + epsilon; const T4 update = (m1o_corrected / denom) + (lambda * T4(weights[id])); const T4 delta = -T4(*eta) * update; // Compute the new gradient. if (grads_out) { grads_out[id] = T_GRAD(delta); } // Compute the new weight. if (weights_out) { weights_out[id] = weights[id] + T3(delta); if (fp16_weights_out) { fp16_weights_out[id] = static_cast<half>(weights_out[id]); } } moment_1_out[id] = m1o; moment_2_out[id] = m2o; } template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM> __global__ void _AdamOptimizer_mode1( const T1* eta, const T3* weights, const T_GRAD* grads, const T4* moment_1, const T4* moment_2, const T3* loss_scale, const T_GRAD_NORM* grad_norm, const T4 alpha, const T4 beta, const T4 lambda, const T4 epsilon, const T4 alpha_correction, const T4 beta_correction, T4* moment_1_out, T4* moment_2_out, T3* weights_out, T_GRAD* grads_out, half* fp16_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm); // Gradient scaling/clipping. const T4 g = T4(grads[id]) / actual_scale; // A shared constant. const T4 one = T4(1.0f); // Compute exponentially-averaged historical gradient. const T4 m1o = alpha * moment_1[id] + (one - alpha) * g; // Compute exponentially-averaged historical squared gradient. const T4 m2o = beta * moment_2[id] + (one - beta) * g * g; const T4 denom = _Sqrt(m2o) + epsilon; // Apply bias correction terms on learning rate const T4 step_size = T4(*eta) * _Sqrt(beta_correction) / alpha_correction; // Huggingface updates weights in the following logic: // param' = param - step_size * m1o / denom // param_out = param' - original_lr * lambda * param' // then param_out = param - step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom) // so delta = -step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom) const T4 delta = -step_size * m1o / denom - T4(*eta) * lambda * (T4(weights[id]) - step_size * m1o / denom); // Compute the new gradient. if (grads_out) { grads_out[id] = T_GRAD(delta); } // Compute the new weight. if (weights_out) { weights_out[id] = weights[id] + T3(delta); if (fp16_weights_out) { fp16_weights_out[id] = static_cast<half>(weights_out[id]); } } moment_1_out[id] = m1o; moment_2_out[id] = m2o; } template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM> void AdamOptimizerImpl( const T1* eta, const T2 update_count, const T3* weights, const T_GRAD* grads, const T4* moment_1, const T4* moment_2, const T3* loss_scale, const T_GRAD_NORM* grad_norm, const T4 alpha, const T4 beta, const T4 lambda, const T4 epsilon, const bool do_bias_correction, const int64_t weight_decay_mode, T4* moment_1_out, T4* moment_2_out, T3* weights_out, T_GRAD* grads_out, half* fp16_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); // If bias correction coefficients are set to 1s, it's equivalent to disabling bias correction. const T4 alpha_correction = do_bias_correction ? onnxruntime::contrib::compute_bias_correction_coefficient(alpha, update_count) : T4(1.f); const T4 beta_correction = do_bias_correction ? onnxruntime::contrib::compute_bias_correction_coefficient(beta, update_count) : T4(1.f); // Currently two modes of Adamw are supported: // Mode 0: Pytorch https://pytorch.org/docs/stable/_modules/torch/optim/adamw.html#AdamW, // bias correction is applied on m and v individually, // weight decay is applied before weight is updated. // Mode 1: Huggingface https://huggingface.co/transformers/_modules/transformers/optimization.html#AdamW., // bias correction is applied on learning rate, // weight decay is applied after weight is updated. if (weight_decay_mode == 0) { _AdamOptimizer_mode0<T1, T3, T4, T_GRAD, T_GRAD_NORM><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( eta, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, alpha_correction, beta_correction, moment_1_out, moment_2_out, weights_out, grads_out, fp16_weights_out, N); } else if (weight_decay_mode == 1) { _AdamOptimizer_mode1<T1, T3, T4, T_GRAD, T_GRAD_NORM><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( eta, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, alpha_correction, beta_correction, moment_1_out, moment_2_out, weights_out, grads_out, fp16_weights_out, N); } else { // Shouldn't reach here ORT_THROW("Unsupported Adamw optimizer mode."); } } #define SPECIALIZED_AdamOptimizerImpl(T1, T2, T3, T4, T_GRAD, T_GRAD_NORM) \ template void AdamOptimizerImpl( \ const T1* eta, \ const T2 update_count, \ const T3* weights, \ const T_GRAD* grads, \ const T4* moment_1, \ const T4* moment_2, \ const T3* loss_scale, \ const T_GRAD_NORM* grad_norm, \ const T4 alpha, \ const T4 beta, \ const T4 lambda, \ const T4 epsilon, \ const bool do_bias_correction, \ const int64_t weight_decay_mode, \ T4* moment_1_out, \ T4* moment_2_out, \ T3* weights_out, \ T_GRAD* grads_out, \ half* fp16_weights_out, \ size_t count); SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, float, float) SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, float, float) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, float, float) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, half) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, float) SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, half) SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, float) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, half) SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, float) } // namespace cuda } // namespace onnxruntime
4b1a8900ca4b842393811e26ba55becc28caea83.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include "roctracer/roctx.h" #define BLOCK_DIM 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param C matrix C as provided to device //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_kernel_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) sum += A[row * K + i] * B[i * K + col]; C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void sgemm_gpu_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); hipLaunchKernelGGL(( sgemm_kernel_A), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, N, M, K, alpha, beta); } __global__ void sgemm_kernel_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) sum += A[row * K + i] * B[i * K + col]; C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void sgemm_gpu_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); hipLaunchKernelGGL(( sgemm_kernel_B), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, N, M, K, alpha, beta); } void random_init(float *data, int size) { for (int i = 0; i < size; ++i) { data[i] = (rand() & 0xFF) / (float)RAND_MAX; } } int main() { float *A, *B, *C; float *d_A, *d_B, *d_C; int N, M, K; float alpha = 2.f; float beta = 1.f; int n_iter = 5; N = M = K = 2048; // allocation of linear memory space A = (float *)malloc(N * K * sizeof(float)); B = (float *)malloc(K * M * sizeof(float)); C = (float *)malloc(N * M * sizeof(float)); // allocation of gpu linear memory space roctxRangePushA("Data Initialization"); hipMalloc((void **)&d_A, N * K * sizeof(float)); hipMalloc((void **)&d_B, K * M * sizeof(float)); hipMalloc((void **)&d_C, N * M * sizeof(float)); roctxRangePop(); // initialize randomized values for memory space roctxRangePushA("Data Initialization"); random_init(A, N * K); random_init(B, K * M); random_init(C, N * M); roctxRangePop(); hipProfilerStart(); // copy initial value for gpu memory roctxRangePushA("Data Transfer"); hipMemcpy(d_A, A, N * K * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B, A, K * M * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_C, A, N * M * sizeof(float), hipMemcpyHostToDevice); roctxRangePop(); roctxRangePushA("Kernel Execution"); // do operation roctxRangePushA("Kernel A"); for (int i = 0; i < n_iter; i++) sgemm_gpu_A(d_A, d_B, d_C, N, M, K, alpha, beta); hipDeviceSynchronize(); roctxRangePop(); roctxRangePushA("Kernel B"); for (int i = 0; i < n_iter; i++) sgemm_gpu_B(d_A, d_B, d_C, N, M, K, alpha, beta); hipDeviceSynchronize(); roctxRangePop(); roctxRangePop(); hipProfilerStop(); // terminates allocated gpu memory space hipFree(d_A); hipFree(d_B); hipFree(d_C); // terminates allocated memory space free(A); free(B); free(C); return 0; }
4b1a8900ca4b842393811e26ba55becc28caea83.cu
#include <stdio.h> #include <cuda_profiler_api.h> #include "nvToolsExt.h" #define BLOCK_DIM 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param C matrix C as provided to device //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_kernel_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) sum += A[row * K + i] * B[i * K + col]; C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void sgemm_gpu_A(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); sgemm_kernel_A<<<dimGrid, dimBlock>>>(A, B, C, N, M, K, alpha, beta); } __global__ void sgemm_kernel_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) sum += A[row * K + i] * B[i * K + col]; C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void sgemm_gpu_B(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); sgemm_kernel_B<<<dimGrid, dimBlock>>>(A, B, C, N, M, K, alpha, beta); } void random_init(float *data, int size) { for (int i = 0; i < size; ++i) { data[i] = (rand() & 0xFF) / (float)RAND_MAX; } } int main() { float *A, *B, *C; float *d_A, *d_B, *d_C; int N, M, K; float alpha = 2.f; float beta = 1.f; int n_iter = 5; N = M = K = 2048; // allocation of linear memory space A = (float *)malloc(N * K * sizeof(float)); B = (float *)malloc(K * M * sizeof(float)); C = (float *)malloc(N * M * sizeof(float)); // allocation of gpu linear memory space nvtxRangePushA("Data Initialization"); cudaMalloc((void **)&d_A, N * K * sizeof(float)); cudaMalloc((void **)&d_B, K * M * sizeof(float)); cudaMalloc((void **)&d_C, N * M * sizeof(float)); nvtxRangePop(); // initialize randomized values for memory space nvtxRangePushA("Data Initialization"); random_init(A, N * K); random_init(B, K * M); random_init(C, N * M); nvtxRangePop(); cudaProfilerStart(); // copy initial value for gpu memory nvtxRangePushA("Data Transfer"); cudaMemcpy(d_A, A, N * K * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, A, K * M * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_C, A, N * M * sizeof(float), cudaMemcpyHostToDevice); nvtxRangePop(); nvtxRangePushA("Kernel Execution"); // do operation nvtxRangePushA("Kernel A"); for (int i = 0; i < n_iter; i++) sgemm_gpu_A(d_A, d_B, d_C, N, M, K, alpha, beta); cudaDeviceSynchronize(); nvtxRangePop(); nvtxRangePushA("Kernel B"); for (int i = 0; i < n_iter; i++) sgemm_gpu_B(d_A, d_B, d_C, N, M, K, alpha, beta); cudaDeviceSynchronize(); nvtxRangePop(); nvtxRangePop(); cudaProfilerStop(); // terminates allocated gpu memory space cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // terminates allocated memory space free(A); free(B); free(C); return 0; }
c050ec8f583a00459c1b58fd5612fa1a4830cf99.hip
// !!! This is a file automatically generated by hipify!!! // includes system #include <sstream> // std::ostringstream #include <cstdio> // include CUDA #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // include project #include "integrator_exception.h" #include "rkn76.h" #include "util.h" #define THREADS_PER_BLOCK 256 static hipError_t HandleError(hipError_t cudaStatus, const char *file, int line) { if (hipSuccess != cudaStatus) { printf( "%s in %s at line %d\n", hipGetErrorString( cudaStatus ), file, line ); return cudaStatus; } return cudaStatus; } #define HANDLE_ERROR(cudaStatus) (HandleError(cudaStatus, __FILE__, __LINE__)) #define LAMBDA 1.0/20.0 #define sQ sqrt(21.0) ttt_t rkn76::c[] = { 0.0, 1.0/10.0, 1.0/5.0, 3.0/8.0, 1.0/2.0, (7.0-sQ)/14.0, (7.0+sQ)/14.0, 1.0, 1.0 }; var_t rkn76::a[] = { 1.0/200.0, 1.0/150.0, 1.0/75.0, 171.0/8192.0, 45.0/4096.0, 315.0/8192.0, 5.0/288.0, 25.0/528.0, 25.0/672.0, 16.0/693.0, (1003.0-205.0*sQ)/12348.0,-25.0*(751.0-173.0*sQ)/90552.0, 25.0*(624.0-137.0*sQ)/43218.0, -128.0*(361.0-79.0*sQ)/237699.0, (3411.0-745.0*sQ)/24696.0, (793.0+187.0*sQ)/12348.0, -25.0*(331.0+113.0*sQ)/90552.0, 25.0*(1044.0+247.0*sQ)/43218.0, -128.0*(14885.0+3779.0*sQ)/9745659.0, (3327.0+797.0*sQ)/24696.0, -(581.0+127.0*sQ)/1722.0, -(157.0-3.0*sQ)/378.0, 25.0*(143.0-10.0*sQ)/2772.0, -25.0*(876.0+55.0*sQ)/3969.0, 1280.0*(913.0+18.0*sQ)/596673.0, -(1353.0+26.0*sQ)/2268.0, 7.0*(1777.0+377.0*sQ)/4428.0, 7.0*(5.0-sQ)/36.0, 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0 }; var_t rkn76::bh[]= { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0, 0.0 }; var_t rkn76::b[] = { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, -LAMBDA, LAMBDA }; #undef sQ // ytemp = y_n + dt*(a21*f1) static __global__ void calc_ytemp_for_f2_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, var_t f1f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid]; tid += stride; } } // ytemp = y_n + dt*(a31*f1 + a32*f2) static __global__ void calc_ytemp_for_f3_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, var_t f1f, var_t f2f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid]; tid += stride; } } // ytemp = y_n + dt*(a41*f1 + a42*f2 + a43*f3) static __global__ void calc_ytemp_for_f4_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, var_t f1f, var_t f2f, var_t f3f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid]; tid += stride; } } // ytemp = y_n + dt*(a51*f1 + a52*f2 + a53*f3 + a54*f4) static __global__ void calc_ytemp_for_f5_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, var_t f1f, var_t f2f, var_t f3f, var_t f4f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid]; tid += stride; } } // ytemp = y_n + dt*(a61*f1 + a62*f2 + a63*f3 + a64*f4 + a65*f5) static __global__ void calc_ytemp_for_f6_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, const var_t *f5, var_t f1f, var_t f2f, var_t f3f, var_t f4f, var_t f5f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid] + f5f * f5[tid]; tid += stride; } } // ytemp = y_n + dt*(a71*f1 + a72*f2 + a73*f3 + a74*f4 + a75*f5 + a76*f6) static __global__ void calc_ytemp_for_f7_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, var_t f1f, var_t f2f, var_t f3f, var_t f4f, var_t f5f, var_t f6f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid] + f5f * f5[tid] + f6f * f6[tid]; tid += stride; } } // ytemp = y_n + dt*(a81*f1 + a82*f2 + a83*f3 + a84*f4 + a85*f5 + a86*f6 + a87*f7) static __global__ void calc_ytemp_for_f8_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, var_t f1f, var_t f2f, var_t f3f, var_t f4f, var_t f5f, var_t f6f, var_t f7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid] + f5f * f5[tid] + f6f * f6[tid] + f7f * f7[tid]; tid += stride; } } // ytemp = y_n + dt*(a91*f1 + a95*f5 + a96*f6 + a97*f7) static __global__ void calc_ytemp_for_f9_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f5, const var_t *f6, const var_t *f7, var_t f1f, var_t f5f, var_t f6f, var_t f7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f5f * f5[tid] + f6f * f6[tid] + f7f * f7[tid]; tid += stride; } } // y = y_n + dt*(bh1*f1 + bh5*f5 + bh6*f6 + bh7*f7) static __global__ void calc_y_kernel(int_t n, var_t *y, const var_t *y_n, const var_t *f1, const var_t *f5, const var_t *f6, const var_t *f7, var_t f1f, var_t f5f, var_t f6f, var_t f7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { y[tid] = y_n[tid] + f1f * f1[tid] + f5f * f5[tid] + f6f * f6[tid] + f7f * f7[tid]; tid += stride; } } static __global__ void calc_f8_sub_f9_kernel(int_t n, var_t* result, const var_t* f8, const var_t* f9) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { result[tid] = f8[tid] - f9[tid]; tid += stride; } } void rkn76::call_calc_f8_sub_f9_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); var_t *err = d_err[i].data().get(); var_t* f8 = d_f[i][7].data().get(); var_t* f9 = d_f[i][8].data().get(); calculate_grid(n, THREADS_PER_BLOCK); hipLaunchKernelGGL(( calc_f8_sub_f9_kernel), dim3(grid), dim3(block), 0, 0, n, err, f8, f9); hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { throw integrator_exception("calc_f8_sub_f9_kernel failed"); } } } void rkn76::call_calc_ytemp_for_fr_kernel(int r) { int idx = 0; for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t* f1 = d_f[i][0].data().get(); var_t* f2 = d_f[i][1].data().get(); var_t* f3 = d_f[i][2].data().get(); var_t* f4 = d_f[i][3].data().get(); var_t* f5 = d_f[i][4].data().get(); var_t* f6 = d_f[i][5].data().get(); var_t* f7 = d_f[i][6].data().get(); switch (r) { case 1: idx = 0; hipLaunchKernelGGL(( calc_ytemp_for_f2_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, a[idx]*dt_try); break; case 2: idx = 1; hipLaunchKernelGGL(( calc_ytemp_for_f3_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, f2, a[idx]*dt_try, a[idx+1]*dt_try); break; case 3: idx = 3; hipLaunchKernelGGL(( calc_ytemp_for_f4_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, f2, f3, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try); break; case 4: idx = 6; hipLaunchKernelGGL(( calc_ytemp_for_f5_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try); break; case 5: idx = 10; hipLaunchKernelGGL(( calc_ytemp_for_f6_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, f5, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try); break; case 6: idx = 15; hipLaunchKernelGGL(( calc_ytemp_for_f7_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, f5, f6, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try); break; case 7: idx = 21; hipLaunchKernelGGL(( calc_ytemp_for_f8_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, f5, f6, f7, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; case 8: idx = 28; hipLaunchKernelGGL(( calc_ytemp_for_f9_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, f1, f5, f6, f7, a[idx]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; default: ostringstream msg("call_calc_ytemp_for_fr_kernel() function was called with invalid parameter: ", ostringstream::ate); msg << r+1 << "!"; throw integrator_exception(msg.str()); } hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { ostringstream msg("calc_ytemp_for_f", ostringstream::ate); msg << r+1 << "_kernel failed"; throw integrator_exception(msg.str()); } } } void rkn76::call_calc_y_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t *y = f.d_yout[i].data().get(); var_t* f1 = d_f[i][0].data().get(); var_t* f5 = d_f[i][4].data().get(); var_t* f6 = d_f[i][5].data().get(); var_t* f7 = d_f[i][6].data().get(); hipLaunchKernelGGL(( calc_y_kernel), dim3(grid), dim3(block), 0, 0, n, y, y_n, f1, f5, f6, f7, b[0]*dt_try, b[4]*dt_try, b[5]*dt_try, b[6]*dt_try); hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { throw integrator_exception("calc_y_kernel failed"); } } } rkn76::rkn76(ode& f, ttt_t dt, bool adaptive, var_t tolerance) : integrator(f, dt), adaptive(adaptive), tolerance(tolerance), d_f(f.get_order()), d_ytemp(f.get_order(), d_var_t()), d_err(f.get_order(), d_var_t()) { RKOrder = 7; r_max = adaptive ? RKOrder + 2 : RKOrder; int forder = f.get_order(); for (int i = 0; i < forder; i++) { int size = f.d_y[i].size(); d_ytemp[i].resize(size); if (adaptive) { d_err[i].resize(size); } d_f[i].resize(r_max); for (int r = 0; r < r_max; r++) { d_f[i][r].resize(size); } } } void rkn76::calculate_grid(int nData, int threads_per_block) { int nThread = ::min(threads_per_block, nData); int nBlock = (nData + nThread - 1)/nThread; grid.x = nBlock; block.x = nThread; } ttt_t rkn76::step() { int forder = f.get_order(); int r = 0; // Calculate f1 = f(tn, yn) = d_f[][0] ttt_t ttemp = f.t + c[r] * dt; for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, f.d_y, d_f[i][r]); } dt_try = dt; var_t max_err = 0.0; int iter = 0; do { dt_did = dt_try; // Calculate f2 = f(tn + c2 * dt, yn + a21 * dt * f1) = d_f[][1] // Calculate f3 = f(tn + c3 * dt, yn + a31 * dt * f1 + ...) = d_f[][2] // Calculate f4 = f(tn + c4 * dt, yn + a41 * dt * f1 + ...) = d_f[][3] // ... // Calculate f7 = f(tn + c7 * dt, yn + a71 * dt * f1 + ...) = d_f[][6] for (r = 1; r < RKOrder; r++) { ttemp = f.t + c[r] * dt_try; call_calc_ytemp_for_fr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, d_ytemp, d_f[i][r]); } } if (adaptive) { // Calculate f8 = f(tn + c8 * dt, yn + a81 * dt * f1 + ...) = d_f[][7] // Calculate f9 = f(tn + c9 * dt, yn + a91 * dt * f1 + ...) = d_f[][8] for (r = RKOrder; r < r_max; r++) { ttemp = f.t + c[r] * dt_try; call_calc_ytemp_for_fr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, r == r_max - 1 ? f.d_yout : d_ytemp, d_f[i][r]); } } // calculate d_err = f8 - f9 call_calc_f8_sub_f9_kernel(); max_err = fabs(dt_try*LAMBDA*::max(max_vec(d_err[0]), max_vec(d_err[1]))); dt_try *= 0.9 * pow(tolerance / max_err, 1.0/8.0); } else { call_calc_y_kernel(); } iter++; } while(adaptive && max_err > tolerance); if (adaptive) { update_counters(iter); } // Set the next step size dt = dt_try; f.tout = f.t + dt_did; f.swap_in_out(); return dt_did; } string rkn76::get_name() { return adaptive ? "a_optRungeKuttaNystrom76" : "optRungeKuttaNystrom76"; } #undef LAMBDA
c050ec8f583a00459c1b58fd5612fa1a4830cf99.cu
// includes system #include <sstream> // std::ostringstream #include <cstdio> // include CUDA #include "cuda_runtime.h" #include "device_launch_parameters.h" // include project #include "integrator_exception.h" #include "rkn76.h" #include "util.h" #define THREADS_PER_BLOCK 256 static cudaError_t HandleError(cudaError_t cudaStatus, const char *file, int line) { if (cudaSuccess != cudaStatus) { printf( "%s in %s at line %d\n", cudaGetErrorString( cudaStatus ), file, line ); return cudaStatus; } return cudaStatus; } #define HANDLE_ERROR(cudaStatus) (HandleError(cudaStatus, __FILE__, __LINE__)) #define LAMBDA 1.0/20.0 #define sQ sqrt(21.0) ttt_t rkn76::c[] = { 0.0, 1.0/10.0, 1.0/5.0, 3.0/8.0, 1.0/2.0, (7.0-sQ)/14.0, (7.0+sQ)/14.0, 1.0, 1.0 }; var_t rkn76::a[] = { 1.0/200.0, 1.0/150.0, 1.0/75.0, 171.0/8192.0, 45.0/4096.0, 315.0/8192.0, 5.0/288.0, 25.0/528.0, 25.0/672.0, 16.0/693.0, (1003.0-205.0*sQ)/12348.0,-25.0*(751.0-173.0*sQ)/90552.0, 25.0*(624.0-137.0*sQ)/43218.0, -128.0*(361.0-79.0*sQ)/237699.0, (3411.0-745.0*sQ)/24696.0, (793.0+187.0*sQ)/12348.0, -25.0*(331.0+113.0*sQ)/90552.0, 25.0*(1044.0+247.0*sQ)/43218.0, -128.0*(14885.0+3779.0*sQ)/9745659.0, (3327.0+797.0*sQ)/24696.0, -(581.0+127.0*sQ)/1722.0, -(157.0-3.0*sQ)/378.0, 25.0*(143.0-10.0*sQ)/2772.0, -25.0*(876.0+55.0*sQ)/3969.0, 1280.0*(913.0+18.0*sQ)/596673.0, -(1353.0+26.0*sQ)/2268.0, 7.0*(1777.0+377.0*sQ)/4428.0, 7.0*(5.0-sQ)/36.0, 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0 }; var_t rkn76::bh[]= { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0, 0.0 }; var_t rkn76::b[] = { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, -LAMBDA, LAMBDA }; #undef sQ // ytemp = y_n + dt*(a21*f1) static __global__ void calc_ytemp_for_f2_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, var_t f1f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid]; tid += stride; } } // ytemp = y_n + dt*(a31*f1 + a32*f2) static __global__ void calc_ytemp_for_f3_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, var_t f1f, var_t f2f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid]; tid += stride; } } // ytemp = y_n + dt*(a41*f1 + a42*f2 + a43*f3) static __global__ void calc_ytemp_for_f4_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, var_t f1f, var_t f2f, var_t f3f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid]; tid += stride; } } // ytemp = y_n + dt*(a51*f1 + a52*f2 + a53*f3 + a54*f4) static __global__ void calc_ytemp_for_f5_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, var_t f1f, var_t f2f, var_t f3f, var_t f4f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid]; tid += stride; } } // ytemp = y_n + dt*(a61*f1 + a62*f2 + a63*f3 + a64*f4 + a65*f5) static __global__ void calc_ytemp_for_f6_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, const var_t *f5, var_t f1f, var_t f2f, var_t f3f, var_t f4f, var_t f5f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid] + f5f * f5[tid]; tid += stride; } } // ytemp = y_n + dt*(a71*f1 + a72*f2 + a73*f3 + a74*f4 + a75*f5 + a76*f6) static __global__ void calc_ytemp_for_f7_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, var_t f1f, var_t f2f, var_t f3f, var_t f4f, var_t f5f, var_t f6f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid] + f5f * f5[tid] + f6f * f6[tid]; tid += stride; } } // ytemp = y_n + dt*(a81*f1 + a82*f2 + a83*f3 + a84*f4 + a85*f5 + a86*f6 + a87*f7) static __global__ void calc_ytemp_for_f8_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f2, const var_t *f3, const var_t *f4, const var_t *f5, const var_t *f6, const var_t *f7, var_t f1f, var_t f2f, var_t f3f, var_t f4f, var_t f5f, var_t f6f, var_t f7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f2f * f2[tid] + f3f * f3[tid] + f4f * f4[tid] + f5f * f5[tid] + f6f * f6[tid] + f7f * f7[tid]; tid += stride; } } // ytemp = y_n + dt*(a91*f1 + a95*f5 + a96*f6 + a97*f7) static __global__ void calc_ytemp_for_f9_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *f1, const var_t *f5, const var_t *f6, const var_t *f7, var_t f1f, var_t f5f, var_t f6f, var_t f7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + f1f * f1[tid] + f5f * f5[tid] + f6f * f6[tid] + f7f * f7[tid]; tid += stride; } } // y = y_n + dt*(bh1*f1 + bh5*f5 + bh6*f6 + bh7*f7) static __global__ void calc_y_kernel(int_t n, var_t *y, const var_t *y_n, const var_t *f1, const var_t *f5, const var_t *f6, const var_t *f7, var_t f1f, var_t f5f, var_t f6f, var_t f7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { y[tid] = y_n[tid] + f1f * f1[tid] + f5f * f5[tid] + f6f * f6[tid] + f7f * f7[tid]; tid += stride; } } static __global__ void calc_f8_sub_f9_kernel(int_t n, var_t* result, const var_t* f8, const var_t* f9) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { result[tid] = f8[tid] - f9[tid]; tid += stride; } } void rkn76::call_calc_f8_sub_f9_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); var_t *err = d_err[i].data().get(); var_t* f8 = d_f[i][7].data().get(); var_t* f9 = d_f[i][8].data().get(); calculate_grid(n, THREADS_PER_BLOCK); calc_f8_sub_f9_kernel<<<grid, block>>>(n, err, f8, f9); cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { throw integrator_exception("calc_f8_sub_f9_kernel failed"); } } } void rkn76::call_calc_ytemp_for_fr_kernel(int r) { int idx = 0; for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t* f1 = d_f[i][0].data().get(); var_t* f2 = d_f[i][1].data().get(); var_t* f3 = d_f[i][2].data().get(); var_t* f4 = d_f[i][3].data().get(); var_t* f5 = d_f[i][4].data().get(); var_t* f6 = d_f[i][5].data().get(); var_t* f7 = d_f[i][6].data().get(); switch (r) { case 1: idx = 0; calc_ytemp_for_f2_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, a[idx]*dt_try); break; case 2: idx = 1; calc_ytemp_for_f3_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, f2, a[idx]*dt_try, a[idx+1]*dt_try); break; case 3: idx = 3; calc_ytemp_for_f4_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, f2, f3, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try); break; case 4: idx = 6; calc_ytemp_for_f5_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try); break; case 5: idx = 10; calc_ytemp_for_f6_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, f5, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try); break; case 6: idx = 15; calc_ytemp_for_f7_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, f5, f6, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try); break; case 7: idx = 21; calc_ytemp_for_f8_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, f2, f3, f4, f5, f6, f7, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; case 8: idx = 28; calc_ytemp_for_f9_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, f1, f5, f6, f7, a[idx]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; default: ostringstream msg("call_calc_ytemp_for_fr_kernel() function was called with invalid parameter: ", ostringstream::ate); msg << r+1 << "!"; throw integrator_exception(msg.str()); } cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { ostringstream msg("calc_ytemp_for_f", ostringstream::ate); msg << r+1 << "_kernel failed"; throw integrator_exception(msg.str()); } } } void rkn76::call_calc_y_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t *y = f.d_yout[i].data().get(); var_t* f1 = d_f[i][0].data().get(); var_t* f5 = d_f[i][4].data().get(); var_t* f6 = d_f[i][5].data().get(); var_t* f7 = d_f[i][6].data().get(); calc_y_kernel<<<grid, block>>>(n, y, y_n, f1, f5, f6, f7, b[0]*dt_try, b[4]*dt_try, b[5]*dt_try, b[6]*dt_try); cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { throw integrator_exception("calc_y_kernel failed"); } } } rkn76::rkn76(ode& f, ttt_t dt, bool adaptive, var_t tolerance) : integrator(f, dt), adaptive(adaptive), tolerance(tolerance), d_f(f.get_order()), d_ytemp(f.get_order(), d_var_t()), d_err(f.get_order(), d_var_t()) { RKOrder = 7; r_max = adaptive ? RKOrder + 2 : RKOrder; int forder = f.get_order(); for (int i = 0; i < forder; i++) { int size = f.d_y[i].size(); d_ytemp[i].resize(size); if (adaptive) { d_err[i].resize(size); } d_f[i].resize(r_max); for (int r = 0; r < r_max; r++) { d_f[i][r].resize(size); } } } void rkn76::calculate_grid(int nData, int threads_per_block) { int nThread = std::min(threads_per_block, nData); int nBlock = (nData + nThread - 1)/nThread; grid.x = nBlock; block.x = nThread; } ttt_t rkn76::step() { int forder = f.get_order(); int r = 0; // Calculate f1 = f(tn, yn) = d_f[][0] ttt_t ttemp = f.t + c[r] * dt; for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, f.d_y, d_f[i][r]); } dt_try = dt; var_t max_err = 0.0; int iter = 0; do { dt_did = dt_try; // Calculate f2 = f(tn + c2 * dt, yn + a21 * dt * f1) = d_f[][1] // Calculate f3 = f(tn + c3 * dt, yn + a31 * dt * f1 + ...) = d_f[][2] // Calculate f4 = f(tn + c4 * dt, yn + a41 * dt * f1 + ...) = d_f[][3] // ... // Calculate f7 = f(tn + c7 * dt, yn + a71 * dt * f1 + ...) = d_f[][6] for (r = 1; r < RKOrder; r++) { ttemp = f.t + c[r] * dt_try; call_calc_ytemp_for_fr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, d_ytemp, d_f[i][r]); } } if (adaptive) { // Calculate f8 = f(tn + c8 * dt, yn + a81 * dt * f1 + ...) = d_f[][7] // Calculate f9 = f(tn + c9 * dt, yn + a91 * dt * f1 + ...) = d_f[][8] for (r = RKOrder; r < r_max; r++) { ttemp = f.t + c[r] * dt_try; call_calc_ytemp_for_fr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, r == r_max - 1 ? f.d_yout : d_ytemp, d_f[i][r]); } } // calculate d_err = f8 - f9 call_calc_f8_sub_f9_kernel(); max_err = fabs(dt_try*LAMBDA*std::max(max_vec(d_err[0]), max_vec(d_err[1]))); dt_try *= 0.9 * pow(tolerance / max_err, 1.0/8.0); } else { call_calc_y_kernel(); } iter++; } while(adaptive && max_err > tolerance); if (adaptive) { update_counters(iter); } // Set the next step size dt = dt_try; f.tout = f.t + dt_did; f.swap_in_out(); return dt_did; } string rkn76::get_name() { return adaptive ? "a_optRungeKuttaNystrom76" : "optRungeKuttaNystrom76"; } #undef LAMBDA
809c8ae22e8ee86c8966c5e6ff6fea2072870f3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by chengjin on 2020-06-02. // #include "cu_utils.h" #include "cu_math.h" #include "cu_device.cuh" #include "nms_kernel.h" namespace quake { namespace framework { namespace ops_lib { template<typename T> __device__ inline T devIoU(T const * const a, T const * const b) { T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left + 1, T(0)), height = max(bottom - top + 1, T(0)); T interS = width * height; T Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); T Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); //printf("(float)compare a[%f,%f,%f,%f] with b[%f,%f,%f,%f] --> Sa:%f,Sb:%f --> %f vs %f\n",a[0],a[1],a[2],a[3],b[0],b[1],b[2],b[3],Sa,Sb,interS,Sa + Sb - interS); return interS / (Sa + Sb - interS); } __device__ inline __half devIoUH(__half const * const a, __half const * const b) { __half left = hmax(a[0], b[0]), right = hmin(a[2], b[2]); __half top = hmax(a[1], b[1]), bottom = hmin(a[3], b[3]); __half width = hmax(__hadd(__hsub(right,left),__half(1)),__half(0)); __half height = hmax(__hadd(__hsub(bottom,top),__half(1)),__half(0)); __half width_a=__hadd(__hsub(a[2],a[0]),__half(1)); __half height_a=__hadd(__hsub(a[3],a[1]),__half(1)); __half width_b=__hadd(__hsub(b[2],b[0]),__half(1)); __half height_b=__hadd(__hsub(b[3],b[1]),__half(1)); __half interS = __hmul(width,height); __half Sa = __hmul(width_a,height_a); __half Sb = __hmul(width_b,height_b); __half decrease=__half(1); for(int i=0;i<10;i++){ if(__hge(interS,65535) || __hge(Sa,65535) || __hge(Sb,65535)){ decrease=__hmul(decrease,2); width=__hdiv(width,decrease); height=__hdiv(height,decrease); width_a=__hdiv(width_a,decrease); height_a=__hdiv(height_a,decrease); width_b=__hdiv(width_b,decrease); height_b=__hdiv(height_b,decrease); interS = __hmul(width,height); Sa = __hmul(width_a,height_a); Sb = __hmul(width_b,height_b); }else{ break; } } /* printf("(half)(decrease %f)compare a[%f,%f,%f,%f] with b[%f,%f,%f,%f] --> Sa:%f,Sb:%f --> %f vs %f\n", __half2float(decrease), __half2float(a[0]),__half2float(a[1]),__half2float(a[2]),__half2float(a[3]), __half2float(b[0]),__half2float(b[1]),__half2float(b[2]),__half2float(b[3]), __half2float(Sa),__half2float(Sb), __half2float(interS),__half2float(__hadd(Sa,__hsub(Sb,interS)))); */ return __hdiv(interS,__hadd(Sa,__hsub(Sb,interS))); } template<typename T> __global__ static void _filter_bboxes_by_score(int batchsize,int bbox_num, int bbox_dim,const T* bboxes,int* mask,T score_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; //batchsize*bbox_num int idx_bz=x/bbox_num; int idx_box=x%bbox_num; if (idx_box<bbox_num && idx_bz<batchsize && \ bboxes[idx_bz*bbox_num*bbox_dim+idx_box*bbox_dim+4]<score_thresh) { mask[idx_bz*bbox_num+idx_box]=1; //printf("%d,%d th score %f < %f, set %d\n",idx_bz,idx_box,bboxes[idx_bz*bbox_num*bbox_dim+idx_box*bbox_dim+4],score_thresh,idx_bz*bbox_num+idx_box); } } template<typename T> __global__ static void _filter_bboxes_by_class_score(int batchsize,int bbox_num, int class_num,const T* scores,int* mask,T score_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; //batchsize*class_num int idx_box = blockIdx.y * blockDim.y + threadIdx.y; //bbox_num int idx_bz=x/class_num; int idx_cls=x%class_num; //mask shape [bz,class_num,bbox_num] //score shape [bz,bbox_num,class_num] int score_index=idx_bz*class_num*bbox_num+idx_box*class_num+idx_cls; if (idx_box<bbox_num && idx_bz<batchsize && idx_cls<class_num && \ scores[score_index]<score_thresh) { mask[idx_bz*class_num*bbox_num+idx_cls*bbox_num+idx_box]=1; } } template<typename T> __global__ static void _fast_nms(int batchsize,int bbox_num,int bbox_dim, const T* bboxes,int* mask,T overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self int self_id = blockIdx.y * blockDim.y + threadIdx.y; // id of other bboxes int bz_id=x/bbox_num; int compare_id=x%bbox_num; int compare_box=bz_id*bbox_num*bbox_dim+compare_id*bbox_dim; int self_box=bz_id*bbox_num*bbox_dim+self_id*bbox_dim; if(bz_id<batchsize && self_id<bbox_num && mask[bz_id*bbox_num+self_id]==0 && \ compare_id<self_id && mask[bz_id*bbox_num+compare_id]==0){ if (devIoU(&bboxes[compare_box],&bboxes[self_box]) > overlap_thresh){ mask[bz_id*bbox_num+self_id]=1; //printf("(float) %d,%d supressed by %d with float %f \n",bz_id,self_id,compare_id,devIoU(&bboxes[compare_box],&bboxes[self_box])); } } } __global__ static void _fast_nmsH(int batchsize,int bbox_num,int bbox_dim, const __half* bboxes,int* mask,__half overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self int self_id = blockIdx.y * blockDim.y + threadIdx.y; // id of other bboxes int bz_id=x/bbox_num; int compare_id=x%bbox_num; int compare_box=bz_id*bbox_num*bbox_dim+compare_id*bbox_dim; int self_box=bz_id*bbox_num*bbox_dim+self_id*bbox_dim; if(bz_id<batchsize && self_id<bbox_num && mask[bz_id*bbox_num+self_id]==0 && \ compare_id<self_id && mask[bz_id*bbox_num+compare_id]==0){ __half overlap=devIoUH(&bboxes[compare_box],&bboxes[self_box]); if (__hgt(overlap,overlap_thresh)){ mask[bz_id*bbox_num+self_id]=1; //printf("(__half) %d supressed by %d with float %f \n",self_id,compare_id,devIoU(&bboxes[compare_box],&bboxes[self_box])); } } } template<typename T> __global__ static void _fast_class_nms(int batchsize,int bbox_num,int class_num,int bbox_dim, const T* bboxes,int* mask,T overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self, batchsize*bbox_num int y = blockIdx.y * blockDim.y + threadIdx.y; // class_num*bbox_num //define idx int cls_idx=y/bbox_num; int self_idx = y%bbox_num; // id of other bboxes int bz_idx=x/bbox_num; int compare_idx=x%bbox_num; //bboxes shape [bz,bbox_nums,class_num,bbox_dim] //mask shape [bz,class_num,bbox_num] int compare_box=bz_idx*bbox_num*class_num*bbox_dim+compare_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_box=bz_idx*bbox_num*class_num*bbox_dim+self_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+self_idx; int compare_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+compare_idx; //do nms if(bz_idx<batchsize && cls_idx<class_num && self_idx<bbox_num && \ mask[self_mask_index]==0 && compare_idx<self_idx && mask[compare_mask_index]==0){ if (devIoU(&bboxes[compare_box],&bboxes[self_box]) > overlap_thresh){ mask[self_mask_index]=1; } } } template<typename T> __global__ static void _fast_class_nmsH(int batchsize,int bbox_num,int class_num,int bbox_dim, const T* bboxes,int* mask,T overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self, batchsize*bbox_num int y = blockIdx.y * blockDim.y + threadIdx.y; // class_num*bbox_num //define idx int cls_idx=y/bbox_num; int self_idx = y%bbox_num; // id of other bboxes int bz_idx=x/bbox_num; int compare_idx=x%bbox_num; //bboxes shape [bz,bbox_nums,class_num,bbox_dim] //mask shape [bz,class_num,bbox_num] int compare_box=bz_idx*bbox_num*class_num*bbox_dim+compare_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_box=bz_idx*bbox_num*class_num*bbox_dim+self_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+self_idx; int compare_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+compare_idx; //do nms if(bz_idx<batchsize && cls_idx<class_num && self_idx<bbox_num && \ mask[self_mask_index]==0 && compare_idx<self_idx && mask[compare_mask_index]==0){ __half overlap=devIoUH(&bboxes[compare_box],&bboxes[self_box]); if (__hgt(overlap,overlap_thresh)){ mask[self_mask_index]=1; } } } __global__ static void _sum_boxes(int batchsize,int boxes_num,int max_to_keep, const int* mask,int* index,int* int_num_outs){ int box_idx = blockIdx.x * blockDim.x + threadIdx.x; // boxes_num int bz_idx = blockIdx.y * blockDim.y + threadIdx.y; // batchsize if (box_idx<boxes_num && bz_idx<batchsize && mask[bz_idx*boxes_num+box_idx]==0) { const int boxId = atomicAdd(&int_num_outs[bz_idx],1); if (boxId < max_to_keep){ //printf("add a box at %d(%d,%d) with %d\n",bz_idx*max_to_keep+boxId,bz_idx,boxId,box_idx); index[bz_idx*max_to_keep+boxId]=box_idx; } } } __global__ static void _sum_class_boxes(int batchsize,int boxes_num,int class_num,int max_to_keep, const int* mask,int* index,int* int_num_outs){ int box_idx = blockIdx.x * blockDim.x + threadIdx.x; // boxes_num int y = blockIdx.y * blockDim.y + threadIdx.y; // batchsize*class_num int bz_idx=y/class_num; int cls_idx=y%class_num; //index shape[bz,class_num,max_to_keep] if (box_idx<boxes_num && bz_idx<batchsize && cls_idx<class_num){ int mask_idx=bz_idx*class_num*boxes_num+cls_idx*boxes_num+box_idx; if(!mask[mask_idx]){ const int boxId = atomicAdd(&int_num_outs[bz_idx*class_num+cls_idx],1); if (boxId < max_to_keep){ index[bz_idx*class_num*max_to_keep+cls_idx*max_to_keep+boxId]=box_idx; } } } } template<typename T> __global__ static void _clip_num_outs(int batchsize,int max_to_keep,int* int_num_outs,T* num_outs){ int bz_id = blockIdx.x * blockDim.x + threadIdx.x; // batchsize index if (bz_id<batchsize) { if(int_num_outs[bz_id]>max_to_keep) int_num_outs[bz_id]=max_to_keep; num_outs[bz_id]=T(int_num_outs[bz_id]); } } template<typename T> __global__ static void _clip_class_num_outs(int batchsize,int class_num,int max_to_keep,int* int_num_outs,T* num_outs){ int x = blockIdx.x * blockDim.x + threadIdx.x; // batchsize*class_num index int bz_idx=x/class_num; int cls_idx=x%class_num; if (bz_idx<batchsize && cls_idx<class_num) { if(int_num_outs[bz_idx*class_num+cls_idx]>max_to_keep) int_num_outs[bz_idx*class_num+cls_idx]=max_to_keep; num_outs[bz_idx*class_num+cls_idx]=T(int_num_outs[bz_idx*class_num+cls_idx]); } } template<typename T> __global__ static void _gather_boxes(int batchsize,int total_num,int boxes_dim,int max_to_keep, const T* boxes,int* index,T* output_boxes,int* int_num_outs){ int x=blockIdx.x * blockDim.x + threadIdx.x; //batchsize*max_to_keep int type_id=blockIdx.y * blockDim.y + threadIdx.y; int bz_id=x/max_to_keep; int box_index=x%max_to_keep; if(type_id<boxes_dim && bz_id<batchsize && box_index<int_num_outs[bz_id]){ int cur_box_id=bz_id*max_to_keep*boxes_dim+box_index*boxes_dim+type_id; output_boxes[cur_box_id]=boxes[bz_id*total_num*boxes_dim+index[bz_id*max_to_keep+box_index]*boxes_dim+type_id]; //printf("output_box[%d,%d,%d](%d)->%d th box ->%f\n",bz_id,box_index,type_id,cur_box_id,index[bz_id*max_to_keep+box_index],output_boxes[cur_box_id]); } } template<typename T> __global__ static void _gather_class_boxes(int batchsize,int class_num, int total_num,int boxes_dim,int max_to_keep, const T* boxes,const T* scores,int* index,T* output_boxes,int* int_num_outs){ int x=blockIdx.x * blockDim.x + threadIdx.x; //batchsize*class_num int y=blockIdx.y * blockDim.y + threadIdx.y; //max_to_keep*(boxes_dim+1) //define the idx int bz_idx=x/class_num; int cls_idx=x%class_num; int box_idx=y/(boxes_dim+1); int type_idx=y%(boxes_dim+1); //in_boxes shape [bz,total_num,class_num,boxes_dim],score shape [bz,total_num,class_num] //index buffer shape[bz,class_num,max_to_keep] //out_boxes shape [bz,class_num,max_to_keep,boxes_dim+1] int numout_idx=bz_idx*class_num+cls_idx; if(type_idx<=boxes_dim && bz_idx<batchsize && cls_idx<class_num && \ box_idx<int_num_outs[numout_idx]){ int cur_box_idx=bz_idx*class_num*max_to_keep*(boxes_dim+1)+cls_idx*max_to_keep*(boxes_dim+1)+box_idx*(boxes_dim+1)+type_idx; int index_val=index[bz_idx*class_num*max_to_keep+cls_idx*max_to_keep+box_idx]; if(type_idx==boxes_dim){ output_boxes[cur_box_idx]=scores[bz_idx*total_num*class_num+index_val*class_num+cls_idx]; }else{ output_boxes[cur_box_idx]=boxes[bz_idx*total_num*class_num*boxes_dim+index_val*class_num*boxes_dim+cls_idx*boxes_dim+type_idx]; } } } template<typename T> void nms_gpu(hipStream_t stream,const T* bboxes,T* output_boxes,T* num_outs, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int bbox_num,int bbox_dim,int max_to_keep,T overlap_thresh,T score_thresh) { //filter with score_thresh dim3 Bl_filter(CU1DBLOCK); dim3 Gr_filter(n_blocks(batchsize*bbox_num,CU1DBLOCK)); hipLaunchKernelGGL(( _filter_bboxes_by_score), dim3(Gr_filter),dim3(Bl_filter),0,stream, batchsize,bbox_num,bbox_dim, bboxes,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*bbox_num,CU2DBLOCK),n_blocks(bbox_num,CU2DBLOCK)); hipLaunchKernelGGL(( _fast_nms), dim3(Gr),dim3(Bl),0,stream, batchsize,bbox_num,bbox_dim, bboxes,mask,overlap_thresh); //sum the boxes CUDA_CHECK(hipMemset(int_num_outs,0,batchsize*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(bbox_num,CU2DBLOCK),n_blocks(batchsize,CU2DBLOCK)); hipLaunchKernelGGL(( _sum_boxes), dim3(Gr_sum),dim3(Bl_sum),0,stream, batchsize,bbox_num,max_to_keep,mask,index_buffer,int_num_outs); dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize,CU1DBLOCK)); hipLaunchKernelGGL(( _clip_num_outs), dim3(Gr_clip),dim3(Bl_clip),0,stream, batchsize,max_to_keep,int_num_outs,num_outs); CUDA_CHECK(hipMemset(output_boxes,0,batchsize*max_to_keep*bbox_dim*sizeof(float))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*max_to_keep,CU2DBLOCK),n_blocks(bbox_dim,CU2DBLOCK)); hipLaunchKernelGGL(( _gather_boxes), dim3(Bl_gather),dim3(Gr_gather),0,stream, batchsize,bbox_num,bbox_dim, max_to_keep,bboxes,index_buffer,output_boxes,int_num_outs); } void nms_gpu(hipStream_t stream,const __half* bboxes,__half* output_boxes, __half* num_outs, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int bbox_num,int bbox_dim,int max_to_keep,__half overlap_thresh,__half score_thresh) { //filter with score_thresh dim3 Bl_filter(CU1DBLOCK); dim3 Gr_filter(n_blocks(batchsize*bbox_num,CU1DBLOCK)); hipLaunchKernelGGL(( _filter_bboxes_by_score), dim3(Gr_filter),dim3(Bl_filter),0,stream, batchsize,bbox_num,bbox_dim, bboxes,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*bbox_num,CU2DBLOCK),n_blocks(bbox_num,CU2DBLOCK)); hipLaunchKernelGGL(( _fast_nmsH), dim3(Gr),dim3(Bl),0,stream, batchsize,bbox_num,bbox_dim, bboxes,mask,overlap_thresh); //sum the boxes CUDA_CHECK(hipMemset(int_num_outs,0,batchsize*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(bbox_num,CU2DBLOCK),n_blocks(batchsize,CU2DBLOCK)); hipLaunchKernelGGL(( _sum_boxes), dim3(Gr_sum),dim3(Bl_sum),0,stream, batchsize,bbox_num,max_to_keep,mask,index_buffer,int_num_outs); dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize,CU1DBLOCK)); hipLaunchKernelGGL(( _clip_num_outs), dim3(Gr_clip),dim3(Bl_clip),0,stream, batchsize,max_to_keep,int_num_outs,num_outs); CUDA_CHECK(hipMemset(output_boxes,0,batchsize*max_to_keep*bbox_dim*sizeof(__half))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*max_to_keep,CU2DBLOCK),n_blocks(bbox_dim,CU2DBLOCK)); hipLaunchKernelGGL(( _gather_boxes), dim3(Bl_gather),dim3(Gr_gather),0,stream, batchsize,bbox_num,bbox_dim, max_to_keep,bboxes,index_buffer,output_boxes,int_num_outs); } template<typename T> void multiclass_nms_gpu(hipStream_t stream,const T* in_boxes,const T* in_scores,T* out_boxes,T* out_nums, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int box_num,int class_num,int box_dim,int max_to_keep,T overlap_thresh,T score_thresh) { //in_boxes shape [bz,box_nums,class_num,box_dim],score shape [bz,box_nums,class_num] //std::cout<<"calling multiclass_nms_gpu with batchsize "<<batchsize<<", box_num "<<box_num<<", class_num "<<class_num<<", box_dim "<<box_dim<<", max_to_keep "<<max_to_keep<<", overlap_thresh "<<overlap_thresh<<", score_thresh "<<score_thresh<<std::endl; //filter with score_thresh dim3 Bl_filter(CU2DBLOCK,CU2DBLOCK); dim3 Gr_filter(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(box_num,CU2DBLOCK)); hipLaunchKernelGGL(( _filter_bboxes_by_class_score), dim3(Gr_filter),dim3(Bl_filter),0,stream, batchsize,box_num, class_num,in_scores,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*box_num,CU2DBLOCK),n_blocks(class_num*box_num,CU2DBLOCK)); hipLaunchKernelGGL(( _fast_class_nms), dim3(Gr),dim3(Bl),0,stream, batchsize,box_num,class_num,box_dim, in_boxes,mask,overlap_thresh); //sum the boxes //num_outs shape[bz,class_num] CUDA_CHECK(hipMemset(int_num_outs,0,batchsize*class_num*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(box_num,CU2DBLOCK),n_blocks(batchsize*class_num,CU2DBLOCK)); hipLaunchKernelGGL(( _sum_class_boxes), dim3(Gr_sum),dim3(Bl_sum),0,stream, batchsize,box_num,class_num,max_to_keep,mask,index_buffer,int_num_outs); //clip and assign output nums dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize*class_num,CU1DBLOCK)); hipLaunchKernelGGL(( _clip_class_num_outs), dim3(Gr_clip),dim3(Bl_clip),0,stream, batchsize,class_num,max_to_keep,int_num_outs,out_nums); //out_boxes shape [bz,class_num,max_to_keep,5] CUDA_CHECK(hipMemset(out_boxes,0,batchsize*class_num*max_to_keep*(box_dim+1)*sizeof(T))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(max_to_keep*(box_dim+1),CU2DBLOCK)); hipLaunchKernelGGL(( _gather_class_boxes), dim3(Bl_gather),dim3(Gr_gather),0,stream, batchsize,class_num, box_num,box_dim,max_to_keep,in_boxes,in_scores,index_buffer,out_boxes,int_num_outs); } void multiclass_nms_gpu(hipStream_t stream,const __half* in_boxes, \ const __half* in_scores,__half* out_boxes,__half* out_nums, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int box_num,int class_num,int box_dim,int max_to_keep, \ __half overlap_thresh,__half score_thresh) { //in_boxes shape [bz,box_nums,class_num,box_dim],score shape [bz,box_nums,class_num] //filter with score_thresh dim3 Bl_filter(CU2DBLOCK,CU2DBLOCK); dim3 Gr_filter(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(box_num,CU2DBLOCK)); hipLaunchKernelGGL(( _filter_bboxes_by_class_score), dim3(Gr_filter),dim3(Bl_filter),0,stream, batchsize,box_num, class_num,in_scores,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*box_num,CU2DBLOCK),n_blocks(class_num*box_num,CU2DBLOCK)); hipLaunchKernelGGL(( _fast_class_nmsH), dim3(Gr),dim3(Bl),0,stream, batchsize,box_num,class_num,box_dim, in_boxes,mask,overlap_thresh); //sum the boxes //num_outs shape[bz,class_num] CUDA_CHECK(hipMemset(int_num_outs,0,batchsize*class_num*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(box_num,CU2DBLOCK),n_blocks(batchsize*class_num,CU2DBLOCK)); hipLaunchKernelGGL(( _sum_class_boxes), dim3(Gr_sum),dim3(Bl_sum),0,stream, batchsize,box_num,class_num,max_to_keep,mask,index_buffer,int_num_outs); //clip and assign output nums dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize*class_num,CU1DBLOCK)); hipLaunchKernelGGL(( _clip_class_num_outs), dim3(Gr_clip),dim3(Bl_clip),0,stream, batchsize,class_num,max_to_keep,int_num_outs,out_nums); //out_boxes shape [bz,class_num,max_to_keep,5] CUDA_CHECK(hipMemset(out_boxes,0,batchsize*class_num*max_to_keep*(box_dim+1)*sizeof(__half))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(max_to_keep*(box_dim+1),CU2DBLOCK)); hipLaunchKernelGGL(( _gather_class_boxes), dim3(Bl_gather),dim3(Gr_gather),0,stream, batchsize,class_num, box_num,box_dim,max_to_keep,in_boxes,in_scores,index_buffer,out_boxes,int_num_outs); } template void nms_gpu<float>(hipStream_t stream,const float* bboxes,float* output_boxes,float* num_outs, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int bbox_num,int bbox_dim,int max_to_keep,float overlap_thresh,float score_thresh); template void multiclass_nms_gpu<float>(hipStream_t stream,const float* in_boxes,const float* in_scores,float* out_boxes,float* out_nums, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int box_num,int class_num,int box_dim,int max_to_keep,float overlap_thresh,float score_thresh); } // namespace ops_lib } // namespace framework } // namespace quake
809c8ae22e8ee86c8966c5e6ff6fea2072870f3e.cu
// // Created by chengjin on 2020-06-02. // #include "cu_utils.h" #include "cu_math.h" #include "cu_device.cuh" #include "nms_kernel.h" namespace quake { namespace framework { namespace ops_lib { template<typename T> __device__ inline T devIoU(T const * const a, T const * const b) { T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left + 1, T(0)), height = max(bottom - top + 1, T(0)); T interS = width * height; T Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); T Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); //printf("(float)compare a[%f,%f,%f,%f] with b[%f,%f,%f,%f] --> Sa:%f,Sb:%f --> %f vs %f\n",a[0],a[1],a[2],a[3],b[0],b[1],b[2],b[3],Sa,Sb,interS,Sa + Sb - interS); return interS / (Sa + Sb - interS); } __device__ inline __half devIoUH(__half const * const a, __half const * const b) { __half left = hmax(a[0], b[0]), right = hmin(a[2], b[2]); __half top = hmax(a[1], b[1]), bottom = hmin(a[3], b[3]); __half width = hmax(__hadd(__hsub(right,left),__half(1)),__half(0)); __half height = hmax(__hadd(__hsub(bottom,top),__half(1)),__half(0)); __half width_a=__hadd(__hsub(a[2],a[0]),__half(1)); __half height_a=__hadd(__hsub(a[3],a[1]),__half(1)); __half width_b=__hadd(__hsub(b[2],b[0]),__half(1)); __half height_b=__hadd(__hsub(b[3],b[1]),__half(1)); __half interS = __hmul(width,height); __half Sa = __hmul(width_a,height_a); __half Sb = __hmul(width_b,height_b); __half decrease=__half(1); for(int i=0;i<10;i++){ if(__hge(interS,65535) || __hge(Sa,65535) || __hge(Sb,65535)){ decrease=__hmul(decrease,2); width=__hdiv(width,decrease); height=__hdiv(height,decrease); width_a=__hdiv(width_a,decrease); height_a=__hdiv(height_a,decrease); width_b=__hdiv(width_b,decrease); height_b=__hdiv(height_b,decrease); interS = __hmul(width,height); Sa = __hmul(width_a,height_a); Sb = __hmul(width_b,height_b); }else{ break; } } /* printf("(half)(decrease %f)compare a[%f,%f,%f,%f] with b[%f,%f,%f,%f] --> Sa:%f,Sb:%f --> %f vs %f\n", __half2float(decrease), __half2float(a[0]),__half2float(a[1]),__half2float(a[2]),__half2float(a[3]), __half2float(b[0]),__half2float(b[1]),__half2float(b[2]),__half2float(b[3]), __half2float(Sa),__half2float(Sb), __half2float(interS),__half2float(__hadd(Sa,__hsub(Sb,interS)))); */ return __hdiv(interS,__hadd(Sa,__hsub(Sb,interS))); } template<typename T> __global__ static void _filter_bboxes_by_score(int batchsize,int bbox_num, int bbox_dim,const T* bboxes,int* mask,T score_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; //batchsize*bbox_num int idx_bz=x/bbox_num; int idx_box=x%bbox_num; if (idx_box<bbox_num && idx_bz<batchsize && \ bboxes[idx_bz*bbox_num*bbox_dim+idx_box*bbox_dim+4]<score_thresh) { mask[idx_bz*bbox_num+idx_box]=1; //printf("%d,%d th score %f < %f, set %d\n",idx_bz,idx_box,bboxes[idx_bz*bbox_num*bbox_dim+idx_box*bbox_dim+4],score_thresh,idx_bz*bbox_num+idx_box); } } template<typename T> __global__ static void _filter_bboxes_by_class_score(int batchsize,int bbox_num, int class_num,const T* scores,int* mask,T score_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; //batchsize*class_num int idx_box = blockIdx.y * blockDim.y + threadIdx.y; //bbox_num int idx_bz=x/class_num; int idx_cls=x%class_num; //mask shape [bz,class_num,bbox_num] //score shape [bz,bbox_num,class_num] int score_index=idx_bz*class_num*bbox_num+idx_box*class_num+idx_cls; if (idx_box<bbox_num && idx_bz<batchsize && idx_cls<class_num && \ scores[score_index]<score_thresh) { mask[idx_bz*class_num*bbox_num+idx_cls*bbox_num+idx_box]=1; } } template<typename T> __global__ static void _fast_nms(int batchsize,int bbox_num,int bbox_dim, const T* bboxes,int* mask,T overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self int self_id = blockIdx.y * blockDim.y + threadIdx.y; // id of other bboxes int bz_id=x/bbox_num; int compare_id=x%bbox_num; int compare_box=bz_id*bbox_num*bbox_dim+compare_id*bbox_dim; int self_box=bz_id*bbox_num*bbox_dim+self_id*bbox_dim; if(bz_id<batchsize && self_id<bbox_num && mask[bz_id*bbox_num+self_id]==0 && \ compare_id<self_id && mask[bz_id*bbox_num+compare_id]==0){ if (devIoU(&bboxes[compare_box],&bboxes[self_box]) > overlap_thresh){ mask[bz_id*bbox_num+self_id]=1; //printf("(float) %d,%d supressed by %d with float %f \n",bz_id,self_id,compare_id,devIoU(&bboxes[compare_box],&bboxes[self_box])); } } } __global__ static void _fast_nmsH(int batchsize,int bbox_num,int bbox_dim, const __half* bboxes,int* mask,__half overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self int self_id = blockIdx.y * blockDim.y + threadIdx.y; // id of other bboxes int bz_id=x/bbox_num; int compare_id=x%bbox_num; int compare_box=bz_id*bbox_num*bbox_dim+compare_id*bbox_dim; int self_box=bz_id*bbox_num*bbox_dim+self_id*bbox_dim; if(bz_id<batchsize && self_id<bbox_num && mask[bz_id*bbox_num+self_id]==0 && \ compare_id<self_id && mask[bz_id*bbox_num+compare_id]==0){ __half overlap=devIoUH(&bboxes[compare_box],&bboxes[self_box]); if (__hgt(overlap,overlap_thresh)){ mask[bz_id*bbox_num+self_id]=1; //printf("(__half) %d supressed by %d with float %f \n",self_id,compare_id,devIoU(&bboxes[compare_box],&bboxes[self_box])); } } } template<typename T> __global__ static void _fast_class_nms(int batchsize,int bbox_num,int class_num,int bbox_dim, const T* bboxes,int* mask,T overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self, batchsize*bbox_num int y = blockIdx.y * blockDim.y + threadIdx.y; // class_num*bbox_num //define idx int cls_idx=y/bbox_num; int self_idx = y%bbox_num; // id of other bboxes int bz_idx=x/bbox_num; int compare_idx=x%bbox_num; //bboxes shape [bz,bbox_nums,class_num,bbox_dim] //mask shape [bz,class_num,bbox_num] int compare_box=bz_idx*bbox_num*class_num*bbox_dim+compare_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_box=bz_idx*bbox_num*class_num*bbox_dim+self_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+self_idx; int compare_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+compare_idx; //do nms if(bz_idx<batchsize && cls_idx<class_num && self_idx<bbox_num && \ mask[self_mask_index]==0 && compare_idx<self_idx && mask[compare_mask_index]==0){ if (devIoU(&bboxes[compare_box],&bboxes[self_box]) > overlap_thresh){ mask[self_mask_index]=1; } } } template<typename T> __global__ static void _fast_class_nmsH(int batchsize,int bbox_num,int class_num,int bbox_dim, const T* bboxes,int* mask,T overlap_thresh){ int x = blockIdx.x * blockDim.x + threadIdx.x; // id of self, batchsize*bbox_num int y = blockIdx.y * blockDim.y + threadIdx.y; // class_num*bbox_num //define idx int cls_idx=y/bbox_num; int self_idx = y%bbox_num; // id of other bboxes int bz_idx=x/bbox_num; int compare_idx=x%bbox_num; //bboxes shape [bz,bbox_nums,class_num,bbox_dim] //mask shape [bz,class_num,bbox_num] int compare_box=bz_idx*bbox_num*class_num*bbox_dim+compare_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_box=bz_idx*bbox_num*class_num*bbox_dim+self_idx*class_num*bbox_dim+cls_idx*bbox_dim; int self_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+self_idx; int compare_mask_index=bz_idx*class_num*bbox_num+cls_idx*bbox_num+compare_idx; //do nms if(bz_idx<batchsize && cls_idx<class_num && self_idx<bbox_num && \ mask[self_mask_index]==0 && compare_idx<self_idx && mask[compare_mask_index]==0){ __half overlap=devIoUH(&bboxes[compare_box],&bboxes[self_box]); if (__hgt(overlap,overlap_thresh)){ mask[self_mask_index]=1; } } } __global__ static void _sum_boxes(int batchsize,int boxes_num,int max_to_keep, const int* mask,int* index,int* int_num_outs){ int box_idx = blockIdx.x * blockDim.x + threadIdx.x; // boxes_num int bz_idx = blockIdx.y * blockDim.y + threadIdx.y; // batchsize if (box_idx<boxes_num && bz_idx<batchsize && mask[bz_idx*boxes_num+box_idx]==0) { const int boxId = atomicAdd(&int_num_outs[bz_idx],1); if (boxId < max_to_keep){ //printf("add a box at %d(%d,%d) with %d\n",bz_idx*max_to_keep+boxId,bz_idx,boxId,box_idx); index[bz_idx*max_to_keep+boxId]=box_idx; } } } __global__ static void _sum_class_boxes(int batchsize,int boxes_num,int class_num,int max_to_keep, const int* mask,int* index,int* int_num_outs){ int box_idx = blockIdx.x * blockDim.x + threadIdx.x; // boxes_num int y = blockIdx.y * blockDim.y + threadIdx.y; // batchsize*class_num int bz_idx=y/class_num; int cls_idx=y%class_num; //index shape[bz,class_num,max_to_keep] if (box_idx<boxes_num && bz_idx<batchsize && cls_idx<class_num){ int mask_idx=bz_idx*class_num*boxes_num+cls_idx*boxes_num+box_idx; if(!mask[mask_idx]){ const int boxId = atomicAdd(&int_num_outs[bz_idx*class_num+cls_idx],1); if (boxId < max_to_keep){ index[bz_idx*class_num*max_to_keep+cls_idx*max_to_keep+boxId]=box_idx; } } } } template<typename T> __global__ static void _clip_num_outs(int batchsize,int max_to_keep,int* int_num_outs,T* num_outs){ int bz_id = blockIdx.x * blockDim.x + threadIdx.x; // batchsize index if (bz_id<batchsize) { if(int_num_outs[bz_id]>max_to_keep) int_num_outs[bz_id]=max_to_keep; num_outs[bz_id]=T(int_num_outs[bz_id]); } } template<typename T> __global__ static void _clip_class_num_outs(int batchsize,int class_num,int max_to_keep,int* int_num_outs,T* num_outs){ int x = blockIdx.x * blockDim.x + threadIdx.x; // batchsize*class_num index int bz_idx=x/class_num; int cls_idx=x%class_num; if (bz_idx<batchsize && cls_idx<class_num) { if(int_num_outs[bz_idx*class_num+cls_idx]>max_to_keep) int_num_outs[bz_idx*class_num+cls_idx]=max_to_keep; num_outs[bz_idx*class_num+cls_idx]=T(int_num_outs[bz_idx*class_num+cls_idx]); } } template<typename T> __global__ static void _gather_boxes(int batchsize,int total_num,int boxes_dim,int max_to_keep, const T* boxes,int* index,T* output_boxes,int* int_num_outs){ int x=blockIdx.x * blockDim.x + threadIdx.x; //batchsize*max_to_keep int type_id=blockIdx.y * blockDim.y + threadIdx.y; int bz_id=x/max_to_keep; int box_index=x%max_to_keep; if(type_id<boxes_dim && bz_id<batchsize && box_index<int_num_outs[bz_id]){ int cur_box_id=bz_id*max_to_keep*boxes_dim+box_index*boxes_dim+type_id; output_boxes[cur_box_id]=boxes[bz_id*total_num*boxes_dim+index[bz_id*max_to_keep+box_index]*boxes_dim+type_id]; //printf("output_box[%d,%d,%d](%d)->%d th box ->%f\n",bz_id,box_index,type_id,cur_box_id,index[bz_id*max_to_keep+box_index],output_boxes[cur_box_id]); } } template<typename T> __global__ static void _gather_class_boxes(int batchsize,int class_num, int total_num,int boxes_dim,int max_to_keep, const T* boxes,const T* scores,int* index,T* output_boxes,int* int_num_outs){ int x=blockIdx.x * blockDim.x + threadIdx.x; //batchsize*class_num int y=blockIdx.y * blockDim.y + threadIdx.y; //max_to_keep*(boxes_dim+1) //define the idx int bz_idx=x/class_num; int cls_idx=x%class_num; int box_idx=y/(boxes_dim+1); int type_idx=y%(boxes_dim+1); //in_boxes shape [bz,total_num,class_num,boxes_dim],score shape [bz,total_num,class_num] //index buffer shape[bz,class_num,max_to_keep] //out_boxes shape [bz,class_num,max_to_keep,boxes_dim+1] int numout_idx=bz_idx*class_num+cls_idx; if(type_idx<=boxes_dim && bz_idx<batchsize && cls_idx<class_num && \ box_idx<int_num_outs[numout_idx]){ int cur_box_idx=bz_idx*class_num*max_to_keep*(boxes_dim+1)+cls_idx*max_to_keep*(boxes_dim+1)+box_idx*(boxes_dim+1)+type_idx; int index_val=index[bz_idx*class_num*max_to_keep+cls_idx*max_to_keep+box_idx]; if(type_idx==boxes_dim){ output_boxes[cur_box_idx]=scores[bz_idx*total_num*class_num+index_val*class_num+cls_idx]; }else{ output_boxes[cur_box_idx]=boxes[bz_idx*total_num*class_num*boxes_dim+index_val*class_num*boxes_dim+cls_idx*boxes_dim+type_idx]; } } } template<typename T> void nms_gpu(cudaStream_t stream,const T* bboxes,T* output_boxes,T* num_outs, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int bbox_num,int bbox_dim,int max_to_keep,T overlap_thresh,T score_thresh) { //filter with score_thresh dim3 Bl_filter(CU1DBLOCK); dim3 Gr_filter(n_blocks(batchsize*bbox_num,CU1DBLOCK)); _filter_bboxes_by_score<<<Gr_filter,Bl_filter,0,stream>>>(batchsize,bbox_num,bbox_dim, bboxes,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*bbox_num,CU2DBLOCK),n_blocks(bbox_num,CU2DBLOCK)); _fast_nms<<<Gr,Bl,0,stream>>>(batchsize,bbox_num,bbox_dim, bboxes,mask,overlap_thresh); //sum the boxes CUDA_CHECK(cudaMemset(int_num_outs,0,batchsize*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(bbox_num,CU2DBLOCK),n_blocks(batchsize,CU2DBLOCK)); _sum_boxes<<<Gr_sum,Bl_sum,0,stream>>>(batchsize,bbox_num,max_to_keep,mask,index_buffer,int_num_outs); dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize,CU1DBLOCK)); _clip_num_outs<<<Gr_clip,Bl_clip,0,stream>>>(batchsize,max_to_keep,int_num_outs,num_outs); CUDA_CHECK(cudaMemset(output_boxes,0,batchsize*max_to_keep*bbox_dim*sizeof(float))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*max_to_keep,CU2DBLOCK),n_blocks(bbox_dim,CU2DBLOCK)); _gather_boxes<<<Bl_gather,Gr_gather,0,stream>>>(batchsize,bbox_num,bbox_dim, max_to_keep,bboxes,index_buffer,output_boxes,int_num_outs); } void nms_gpu(cudaStream_t stream,const __half* bboxes,__half* output_boxes, __half* num_outs, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int bbox_num,int bbox_dim,int max_to_keep,__half overlap_thresh,__half score_thresh) { //filter with score_thresh dim3 Bl_filter(CU1DBLOCK); dim3 Gr_filter(n_blocks(batchsize*bbox_num,CU1DBLOCK)); _filter_bboxes_by_score<<<Gr_filter,Bl_filter,0,stream>>>(batchsize,bbox_num,bbox_dim, bboxes,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*bbox_num,CU2DBLOCK),n_blocks(bbox_num,CU2DBLOCK)); _fast_nmsH<<<Gr,Bl,0,stream>>>(batchsize,bbox_num,bbox_dim, bboxes,mask,overlap_thresh); //sum the boxes CUDA_CHECK(cudaMemset(int_num_outs,0,batchsize*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(bbox_num,CU2DBLOCK),n_blocks(batchsize,CU2DBLOCK)); _sum_boxes<<<Gr_sum,Bl_sum,0,stream>>>(batchsize,bbox_num,max_to_keep,mask,index_buffer,int_num_outs); dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize,CU1DBLOCK)); _clip_num_outs<<<Gr_clip,Bl_clip,0,stream>>>(batchsize,max_to_keep,int_num_outs,num_outs); CUDA_CHECK(cudaMemset(output_boxes,0,batchsize*max_to_keep*bbox_dim*sizeof(__half))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*max_to_keep,CU2DBLOCK),n_blocks(bbox_dim,CU2DBLOCK)); _gather_boxes<<<Bl_gather,Gr_gather,0,stream>>>(batchsize,bbox_num,bbox_dim, max_to_keep,bboxes,index_buffer,output_boxes,int_num_outs); } template<typename T> void multiclass_nms_gpu(cudaStream_t stream,const T* in_boxes,const T* in_scores,T* out_boxes,T* out_nums, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int box_num,int class_num,int box_dim,int max_to_keep,T overlap_thresh,T score_thresh) { //in_boxes shape [bz,box_nums,class_num,box_dim],score shape [bz,box_nums,class_num] //std::cout<<"calling multiclass_nms_gpu with batchsize "<<batchsize<<", box_num "<<box_num<<", class_num "<<class_num<<", box_dim "<<box_dim<<", max_to_keep "<<max_to_keep<<", overlap_thresh "<<overlap_thresh<<", score_thresh "<<score_thresh<<std::endl; //filter with score_thresh dim3 Bl_filter(CU2DBLOCK,CU2DBLOCK); dim3 Gr_filter(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(box_num,CU2DBLOCK)); _filter_bboxes_by_class_score<<<Gr_filter,Bl_filter,0,stream>>>(batchsize,box_num, class_num,in_scores,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*box_num,CU2DBLOCK),n_blocks(class_num*box_num,CU2DBLOCK)); _fast_class_nms<<<Gr,Bl,0,stream>>>(batchsize,box_num,class_num,box_dim, in_boxes,mask,overlap_thresh); //sum the boxes //num_outs shape[bz,class_num] CUDA_CHECK(cudaMemset(int_num_outs,0,batchsize*class_num*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(box_num,CU2DBLOCK),n_blocks(batchsize*class_num,CU2DBLOCK)); _sum_class_boxes<<<Gr_sum,Bl_sum,0,stream>>>(batchsize,box_num,class_num,max_to_keep,mask,index_buffer,int_num_outs); //clip and assign output nums dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize*class_num,CU1DBLOCK)); _clip_class_num_outs<<<Gr_clip,Bl_clip,0,stream>>>(batchsize,class_num,max_to_keep,int_num_outs,out_nums); //out_boxes shape [bz,class_num,max_to_keep,5] CUDA_CHECK(cudaMemset(out_boxes,0,batchsize*class_num*max_to_keep*(box_dim+1)*sizeof(T))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(max_to_keep*(box_dim+1),CU2DBLOCK)); _gather_class_boxes<<<Bl_gather,Gr_gather,0,stream>>>(batchsize,class_num, box_num,box_dim,max_to_keep,in_boxes,in_scores,index_buffer,out_boxes,int_num_outs); } void multiclass_nms_gpu(cudaStream_t stream,const __half* in_boxes, \ const __half* in_scores,__half* out_boxes,__half* out_nums, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int box_num,int class_num,int box_dim,int max_to_keep, \ __half overlap_thresh,__half score_thresh) { //in_boxes shape [bz,box_nums,class_num,box_dim],score shape [bz,box_nums,class_num] //filter with score_thresh dim3 Bl_filter(CU2DBLOCK,CU2DBLOCK); dim3 Gr_filter(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(box_num,CU2DBLOCK)); _filter_bboxes_by_class_score<<<Gr_filter,Bl_filter,0,stream>>>(batchsize,box_num, class_num,in_scores,mask,score_thresh); //set the mask dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(batchsize*box_num,CU2DBLOCK),n_blocks(class_num*box_num,CU2DBLOCK)); _fast_class_nmsH<<<Gr,Bl,0,stream>>>(batchsize,box_num,class_num,box_dim, in_boxes,mask,overlap_thresh); //sum the boxes //num_outs shape[bz,class_num] CUDA_CHECK(cudaMemset(int_num_outs,0,batchsize*class_num*sizeof(int))); dim3 Bl_sum(CU2DBLOCK,CU2DBLOCK); dim3 Gr_sum(n_blocks(box_num,CU2DBLOCK),n_blocks(batchsize*class_num,CU2DBLOCK)); _sum_class_boxes<<<Gr_sum,Bl_sum,0,stream>>>(batchsize,box_num,class_num,max_to_keep,mask,index_buffer,int_num_outs); //clip and assign output nums dim3 Bl_clip(CU1DBLOCK); dim3 Gr_clip(n_blocks(batchsize*class_num,CU1DBLOCK)); _clip_class_num_outs<<<Gr_clip,Bl_clip,0,stream>>>(batchsize,class_num,max_to_keep,int_num_outs,out_nums); //out_boxes shape [bz,class_num,max_to_keep,5] CUDA_CHECK(cudaMemset(out_boxes,0,batchsize*class_num*max_to_keep*(box_dim+1)*sizeof(__half))); dim3 Bl_gather(CU2DBLOCK,CU2DBLOCK); dim3 Gr_gather(n_blocks(batchsize*class_num,CU2DBLOCK),n_blocks(max_to_keep*(box_dim+1),CU2DBLOCK)); _gather_class_boxes<<<Bl_gather,Gr_gather,0,stream>>>(batchsize,class_num, box_num,box_dim,max_to_keep,in_boxes,in_scores,index_buffer,out_boxes,int_num_outs); } template void nms_gpu<float>(cudaStream_t stream,const float* bboxes,float* output_boxes,float* num_outs, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int bbox_num,int bbox_dim,int max_to_keep,float overlap_thresh,float score_thresh); template void multiclass_nms_gpu<float>(cudaStream_t stream,const float* in_boxes,const float* in_scores,float* out_boxes,float* out_nums, int* int_num_outs,int* mask,int* index_buffer, int batchsize,int box_num,int class_num,int box_dim,int max_to_keep,float overlap_thresh,float score_thresh); } // namespace ops_lib } // namespace framework } // namespace quake
d4aa2104467668a4eae1586d509ff0d73f3df6dd.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <algorithm> #include <iterator> #include <random> #include <sstream> #include <vector> #include "fastertransformer/cuda/hipcub/hipcub.hpp" #include "fusion_decoding_op.h" #include "pd_traits.h" template <paddle::DataType D> std::vector<paddle::Tensor> decoding_kernel( const paddle::Tensor& input, const paddle::Tensor& memory_sequence_length, const paddle::Tensor& word_emb, const std::vector<paddle::Tensor>& self_layernorm_weight, const std::vector<paddle::Tensor>& self_layernorm_bias, const std::vector<paddle::Tensor>& self_attn_query_weight, const std::vector<paddle::Tensor>& self_attn_query_bias, const std::vector<paddle::Tensor>& self_attn_key_weight, const std::vector<paddle::Tensor>& self_attn_key_bias, const std::vector<paddle::Tensor>& self_attn_value_weight, const std::vector<paddle::Tensor>& self_attn_value_bias, const std::vector<paddle::Tensor>& self_attn_output_weight, const std::vector<paddle::Tensor>& self_attn_output_bias, const std::vector<paddle::Tensor>& cross_layernorm_weight, const std::vector<paddle::Tensor>& cross_layernorm_bias, const std::vector<paddle::Tensor>& cross_attn_query_weight, const std::vector<paddle::Tensor>& cross_attn_query_bias, const std::vector<paddle::Tensor>& cross_attn_key_weight, const std::vector<paddle::Tensor>& cross_attn_key_bias, const std::vector<paddle::Tensor>& cross_attn_value_weight, const std::vector<paddle::Tensor>& cross_attn_value_bias, const std::vector<paddle::Tensor>& cross_attn_output_weight, const std::vector<paddle::Tensor>& cross_attn_output_bias, const std::vector<paddle::Tensor>& ffn_layernorm_weight, const std::vector<paddle::Tensor>& ffn_layernorm_bias, const std::vector<paddle::Tensor>& ffn_intermediate_weight, const std::vector<paddle::Tensor>& ffn_intermediate_bias, const std::vector<paddle::Tensor>& ffn_output_weight, const std::vector<paddle::Tensor>& ffn_output_bias, const paddle::Tensor& decoder_layernorm_weight, const paddle::Tensor& decoder_layernorm_bias, const paddle::Tensor& embedding_weight, const paddle::Tensor& embedding_bias, const paddle::Tensor& position_encoding_table, paddle::Tensor& output_ids, paddle::Tensor& parent_ids, paddle::Tensor& sequence_length, std::string decoding_strategy, int beam_size, int topk, float topp, int head_num_, int size_per_head_, int num_layer_, int start_id_, int end_id_, int64_t max_seq_len_, float beam_search_diversity_rate_, float alpha, hipblasHandle_t cublas_handle_, hipStream_t stream) { int beam_width_ = (decoding_strategy == "beam_search" || decoding_strategy == "beam_search_v2") ? beam_size : 1; int candidate_num_ = (decoding_strategy == "topk_sampling" || decoding_strategy == "topp_sampling") ? topk : 1; float probability_threshold_ = (decoding_strategy == "topk_sampling" || decoding_strategy == "topp_sampling") ? topp : 0.0; auto input_dims = input.shape(); int batch_size_ = (decoding_strategy == "beam_search" || decoding_strategy == "beam_search_v2") ? input_dims[0] / beam_width_ : input_dims[0]; const int memory_max_seq_len = input_dims[1]; const int memory_hidden_dim = input_dims[2]; const int vocab_size = word_emb.shape()[0]; typedef PDTraits<D> traits_; typedef typename traits_::DataType DataType_; typedef typename traits_::data_t data_t_; DecodingInitParam<DataType_> decoding_params; decoding_params.cublas_handle = cublas_handle_; decoding_params.output_ids = output_ids.mutable_data<int>(input.place()); decoding_params.parent_ids = parent_ids.mutable_data<int>(input.place()); decoding_params.sequence_length = sequence_length.mutable_data<int>(input.place()); typedef DecoderTransformerTraits<traits_::OpType> DecodingTraits_; decoding_params.stream = stream; fastertransformer::Allocator<AllocatorType::PD> allocator_(stream); decoding_params.memory_tensor = reinterpret_cast<const DataType_*>(input.data<data_t_>()); decoding_params.memory_sequence_length = memory_sequence_length.data<int>(); DecoderInitParam<DataType_>* params = new DecoderInitParam<DataType_>[num_layer_]; for (int i = 0; i < num_layer_; i++) { params[i].stream = stream; params[i].cublas_handle = cublas_handle_; // self attn params[i].self_layernorm.gamma = reinterpret_cast<const DataType_*>( self_layernorm_weight[i].data<data_t_>()); params[i].self_layernorm.beta = reinterpret_cast<const DataType_*>( self_layernorm_bias[i].data<data_t_>()); // query params[i].self_attention.query_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_query_weight[i].data<data_t_>()); params[i].self_attention.query_weight.bias = reinterpret_cast<const DataType_*>( self_attn_query_bias[i].data<data_t_>()); // key params[i].self_attention.key_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_key_weight[i].data<data_t_>()); params[i].self_attention.key_weight.bias = reinterpret_cast<const DataType_*>( self_attn_key_bias[i].data<data_t_>()); // value params[i].self_attention.value_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_value_weight[i].data<data_t_>()); params[i].self_attention.value_weight.bias = reinterpret_cast<const DataType_*>( self_attn_value_bias[i].data<data_t_>()); // out proj params[i].self_attention.attention_output_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_output_weight[i].data<data_t_>()); params[i].self_attention.attention_output_weight.bias = reinterpret_cast<const DataType_*>( self_attn_output_bias[i].data<data_t_>()); // cross params[i].cross_layernorm.gamma = reinterpret_cast<const DataType_*>( cross_layernorm_weight[i].data<data_t_>()); params[i].cross_layernorm.beta = reinterpret_cast<const DataType_*>( cross_layernorm_bias[i].data<data_t_>()); // query params[i].cross_attention.query_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_query_weight[i].data<data_t_>()); params[i].cross_attention.query_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_query_bias[i].data<data_t_>()); // key params[i].cross_attention.key_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_key_weight[i].data<data_t_>()); params[i].cross_attention.key_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_key_bias[i].data<data_t_>()); // value params[i].cross_attention.value_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_value_weight[i].data<data_t_>()); params[i].cross_attention.value_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_value_bias[i].data<data_t_>()); // out proj params[i].cross_attention.attention_output_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_output_weight[i].data<data_t_>()); params[i].cross_attention.attention_output_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_output_bias[i].data<data_t_>()); // ffn params[i].ffn_layernorm.gamma = reinterpret_cast<const DataType_*>( ffn_layernorm_weight[i].data<data_t_>()); params[i].ffn_layernorm.beta = reinterpret_cast<const DataType_*>( ffn_layernorm_bias[i].data<data_t_>()); // intermediate proj params[i].ffn.intermediate_weight.kernel = reinterpret_cast<const DataType_*>( ffn_intermediate_weight[i].data<data_t_>()); params[i].ffn.intermediate_weight.bias = reinterpret_cast<const DataType_*>( ffn_intermediate_bias[i].data<data_t_>()); // out proj params[i].ffn.output_weight.kernel = reinterpret_cast<const DataType_*>( ffn_output_weight[i].data<data_t_>()); params[i].ffn.output_weight.bias = reinterpret_cast<const DataType_*>(ffn_output_bias[i].data<data_t_>()); } decoding_params.layernorm.gamma = reinterpret_cast<const DataType_*>( decoder_layernorm_weight.data<data_t_>()); decoding_params.layernorm.beta = reinterpret_cast<const DataType_*>( decoder_layernorm_bias.data<data_t_>()); // for embedding decoding_params.embedding_table = reinterpret_cast<const DataType_*>(word_emb.data<data_t_>()); // for weight sharing matmul decoding_params.embedding_kernel = reinterpret_cast<const DataType_*>(embedding_weight.data<data_t_>()); // NOTE: the data type of the embedding bias for logits is different // between decoding with beam search and top-k/top-p sampling in // FasterTransformer when using float16. if ("beam_search" == decoding_strategy || "beam_search_v2" == decoding_strategy) { // for matmul bias decoding_params.embedding_bias = reinterpret_cast<const float*>(embedding_bias.data<float>()); } else if ("topk_sampling" == decoding_strategy || "topp_sampling" == decoding_strategy) { decoding_params.embedding_bias_T = reinterpret_cast<const DataType_*>(embedding_bias.data<data_t_>()); } decoding_params.position_encoding_table = reinterpret_cast<const DataType_*>( position_encoding_table.data<data_t_>()); if ("beam_search" == decoding_strategy) { DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beam_search_; decoding_beam_search_ = new DecodingBeamsearch<DecodingTraits_::OpType>( allocator_, batch_size_, beam_width_, max_seq_len_, head_num_, size_per_head_, vocab_size, num_layer_, memory_hidden_dim, memory_max_seq_len, start_id_, end_id_, beam_search_diversity_rate_); decoding_beam_search_->forward(params, decoding_params); delete decoding_beam_search_; } else if ("beam_search_v2" == decoding_strategy) { DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beam_search_; decoding_beam_search_ = new DecodingBeamsearch<DecodingTraits_::OpType>( allocator_, batch_size_, beam_width_, max_seq_len_, head_num_, size_per_head_, vocab_size, num_layer_, memory_hidden_dim, memory_max_seq_len, start_id_, end_id_, beam_search_diversity_rate_, true, // is_fuse_topk_softMax_ true, // keep_alive_beam_ alpha); decoding_beam_search_->forward(params, decoding_params); delete decoding_beam_search_; } else if ("topk_sampling" == decoding_strategy || "topp_sampling" == decoding_strategy) { DecodingSampling<DecodingTraits_::OpType>* decoding_sampling_; decoding_sampling_ = new DecodingSampling<DecodingTraits_::OpType>(allocator_, batch_size_, max_seq_len_, head_num_, size_per_head_, vocab_size, num_layer_, memory_hidden_dim, memory_max_seq_len, start_id_, end_id_, candidate_num_, probability_threshold_); decoding_sampling_->forward(params, decoding_params); delete decoding_sampling_; } else { PD_THROW( "Only beam_search, topk_sampling and topp_sampling are supported for " "FasterTransformer. "); } delete[] params; return {output_ids, parent_ids, sequence_length}; } std::vector<paddle::Tensor> DecodingCUDAForward( const paddle::Tensor& input, const paddle::Tensor& mem_seq_len, const paddle::Tensor& word_embedding, const std::vector<paddle::Tensor>& self_ln_weight, const std::vector<paddle::Tensor>& self_ln_bias, const std::vector<paddle::Tensor>& self_q_weight, const std::vector<paddle::Tensor>& self_q_bias, const std::vector<paddle::Tensor>& self_k_weight, const std::vector<paddle::Tensor>& self_k_bias, const std::vector<paddle::Tensor>& self_v_weight, const std::vector<paddle::Tensor>& self_v_bias, const std::vector<paddle::Tensor>& self_out_weight, const std::vector<paddle::Tensor>& self_out_bias, const std::vector<paddle::Tensor>& cross_ln_weight, const std::vector<paddle::Tensor>& cross_ln_bias, const std::vector<paddle::Tensor>& cross_q_weight, const std::vector<paddle::Tensor>& cross_q_bias, const std::vector<paddle::Tensor>& cross_k_weight, const std::vector<paddle::Tensor>& cross_k_bias, const std::vector<paddle::Tensor>& cross_v_weight, const std::vector<paddle::Tensor>& cross_v_bias, const std::vector<paddle::Tensor>& cross_out_weight, const std::vector<paddle::Tensor>& cross_out_bias, const std::vector<paddle::Tensor>& ffn_ln_weight, const std::vector<paddle::Tensor>& ffn_ln_bias, const std::vector<paddle::Tensor>& ffn_inter_weight, const std::vector<paddle::Tensor>& ffn_inter_bias, const std::vector<paddle::Tensor>& ffn_out_weight, const std::vector<paddle::Tensor>& ffn_out_bias, const paddle::Tensor& decoder_ln_weight, const paddle::Tensor& decoder_ln_bias, const paddle::Tensor& embedding_weight, const paddle::Tensor& embedding_bias, const paddle::Tensor& positional_embedding_weight, paddle::Tensor& output_ids, paddle::Tensor& parent_ids, paddle::Tensor& sequence_length, std::string decoding_strategy, int beam_size, int topk, float topp, int n_head, int size_per_head, int num_layer, int bos_id, int eos_id, int64_t max_len, float beam_search_diversity_rate, float alpha) { auto stream = input.stream(); hipblasHandle_t cublas_handle_; hipblasCreate(&cublas_handle_); hipblasSetStream(cublas_handle_, stream); std::vector<paddle::Tensor> ret; switch (input.type()) { case paddle::DataType::FLOAT16: { ret = decoding_kernel<paddle::DataType::FLOAT16>( input, mem_seq_len, word_embedding, self_ln_weight, self_ln_bias, self_q_weight, self_q_bias, self_k_weight, self_k_bias, self_v_weight, self_v_bias, self_out_weight, self_out_bias, cross_ln_weight, cross_ln_bias, cross_q_weight, cross_q_bias, cross_k_weight, cross_k_bias, cross_v_weight, cross_v_bias, cross_out_weight, cross_out_bias, ffn_ln_weight, ffn_ln_bias, ffn_inter_weight, ffn_inter_bias, ffn_out_weight, ffn_out_bias, decoder_ln_weight, decoder_ln_bias, embedding_weight, embedding_bias, positional_embedding_weight, output_ids, parent_ids, sequence_length, decoding_strategy, beam_size, topk, topp, n_head, size_per_head, num_layer, bos_id, eos_id, max_len, beam_search_diversity_rate, alpha, cublas_handle_, stream); break; } case paddle::DataType::FLOAT32: { ret = decoding_kernel<paddle::DataType::FLOAT32>( input, mem_seq_len, word_embedding, self_ln_weight, self_ln_bias, self_q_weight, self_q_bias, self_k_weight, self_k_bias, self_v_weight, self_v_bias, self_out_weight, self_out_bias, cross_ln_weight, cross_ln_bias, cross_q_weight, cross_q_bias, cross_k_weight, cross_k_bias, cross_v_weight, cross_v_bias, cross_out_weight, cross_out_bias, ffn_ln_weight, ffn_ln_bias, ffn_inter_weight, ffn_inter_bias, ffn_out_weight, ffn_out_bias, decoder_ln_weight, decoder_ln_bias, embedding_weight, embedding_bias, positional_embedding_weight, output_ids, parent_ids, sequence_length, decoding_strategy, beam_size, topk, topp, n_head, size_per_head, num_layer, bos_id, eos_id, max_len, beam_search_diversity_rate, alpha, cublas_handle_, stream); break; } default: { PD_THROW( "NOT supported data type. " "Only float16 and float32 are supported. "); break; } } hipblasDestroy(cublas_handle_); return ret; }
d4aa2104467668a4eae1586d509ff0d73f3df6dd.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> #include <algorithm> #include <iterator> #include <random> #include <sstream> #include <vector> #include "fastertransformer/cuda/cub/cub.cuh" #include "fusion_decoding_op.h" #include "pd_traits.h" template <paddle::DataType D> std::vector<paddle::Tensor> decoding_kernel( const paddle::Tensor& input, const paddle::Tensor& memory_sequence_length, const paddle::Tensor& word_emb, const std::vector<paddle::Tensor>& self_layernorm_weight, const std::vector<paddle::Tensor>& self_layernorm_bias, const std::vector<paddle::Tensor>& self_attn_query_weight, const std::vector<paddle::Tensor>& self_attn_query_bias, const std::vector<paddle::Tensor>& self_attn_key_weight, const std::vector<paddle::Tensor>& self_attn_key_bias, const std::vector<paddle::Tensor>& self_attn_value_weight, const std::vector<paddle::Tensor>& self_attn_value_bias, const std::vector<paddle::Tensor>& self_attn_output_weight, const std::vector<paddle::Tensor>& self_attn_output_bias, const std::vector<paddle::Tensor>& cross_layernorm_weight, const std::vector<paddle::Tensor>& cross_layernorm_bias, const std::vector<paddle::Tensor>& cross_attn_query_weight, const std::vector<paddle::Tensor>& cross_attn_query_bias, const std::vector<paddle::Tensor>& cross_attn_key_weight, const std::vector<paddle::Tensor>& cross_attn_key_bias, const std::vector<paddle::Tensor>& cross_attn_value_weight, const std::vector<paddle::Tensor>& cross_attn_value_bias, const std::vector<paddle::Tensor>& cross_attn_output_weight, const std::vector<paddle::Tensor>& cross_attn_output_bias, const std::vector<paddle::Tensor>& ffn_layernorm_weight, const std::vector<paddle::Tensor>& ffn_layernorm_bias, const std::vector<paddle::Tensor>& ffn_intermediate_weight, const std::vector<paddle::Tensor>& ffn_intermediate_bias, const std::vector<paddle::Tensor>& ffn_output_weight, const std::vector<paddle::Tensor>& ffn_output_bias, const paddle::Tensor& decoder_layernorm_weight, const paddle::Tensor& decoder_layernorm_bias, const paddle::Tensor& embedding_weight, const paddle::Tensor& embedding_bias, const paddle::Tensor& position_encoding_table, paddle::Tensor& output_ids, paddle::Tensor& parent_ids, paddle::Tensor& sequence_length, std::string decoding_strategy, int beam_size, int topk, float topp, int head_num_, int size_per_head_, int num_layer_, int start_id_, int end_id_, int64_t max_seq_len_, float beam_search_diversity_rate_, float alpha, cublasHandle_t cublas_handle_, cudaStream_t stream) { int beam_width_ = (decoding_strategy == "beam_search" || decoding_strategy == "beam_search_v2") ? beam_size : 1; int candidate_num_ = (decoding_strategy == "topk_sampling" || decoding_strategy == "topp_sampling") ? topk : 1; float probability_threshold_ = (decoding_strategy == "topk_sampling" || decoding_strategy == "topp_sampling") ? topp : 0.0; auto input_dims = input.shape(); int batch_size_ = (decoding_strategy == "beam_search" || decoding_strategy == "beam_search_v2") ? input_dims[0] / beam_width_ : input_dims[0]; const int memory_max_seq_len = input_dims[1]; const int memory_hidden_dim = input_dims[2]; const int vocab_size = word_emb.shape()[0]; typedef PDTraits<D> traits_; typedef typename traits_::DataType DataType_; typedef typename traits_::data_t data_t_; DecodingInitParam<DataType_> decoding_params; decoding_params.cublas_handle = cublas_handle_; decoding_params.output_ids = output_ids.mutable_data<int>(input.place()); decoding_params.parent_ids = parent_ids.mutable_data<int>(input.place()); decoding_params.sequence_length = sequence_length.mutable_data<int>(input.place()); typedef DecoderTransformerTraits<traits_::OpType> DecodingTraits_; decoding_params.stream = stream; fastertransformer::Allocator<AllocatorType::PD> allocator_(stream); decoding_params.memory_tensor = reinterpret_cast<const DataType_*>(input.data<data_t_>()); decoding_params.memory_sequence_length = memory_sequence_length.data<int>(); DecoderInitParam<DataType_>* params = new DecoderInitParam<DataType_>[num_layer_]; for (int i = 0; i < num_layer_; i++) { params[i].stream = stream; params[i].cublas_handle = cublas_handle_; // self attn params[i].self_layernorm.gamma = reinterpret_cast<const DataType_*>( self_layernorm_weight[i].data<data_t_>()); params[i].self_layernorm.beta = reinterpret_cast<const DataType_*>( self_layernorm_bias[i].data<data_t_>()); // query params[i].self_attention.query_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_query_weight[i].data<data_t_>()); params[i].self_attention.query_weight.bias = reinterpret_cast<const DataType_*>( self_attn_query_bias[i].data<data_t_>()); // key params[i].self_attention.key_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_key_weight[i].data<data_t_>()); params[i].self_attention.key_weight.bias = reinterpret_cast<const DataType_*>( self_attn_key_bias[i].data<data_t_>()); // value params[i].self_attention.value_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_value_weight[i].data<data_t_>()); params[i].self_attention.value_weight.bias = reinterpret_cast<const DataType_*>( self_attn_value_bias[i].data<data_t_>()); // out proj params[i].self_attention.attention_output_weight.kernel = reinterpret_cast<const DataType_*>( self_attn_output_weight[i].data<data_t_>()); params[i].self_attention.attention_output_weight.bias = reinterpret_cast<const DataType_*>( self_attn_output_bias[i].data<data_t_>()); // cross params[i].cross_layernorm.gamma = reinterpret_cast<const DataType_*>( cross_layernorm_weight[i].data<data_t_>()); params[i].cross_layernorm.beta = reinterpret_cast<const DataType_*>( cross_layernorm_bias[i].data<data_t_>()); // query params[i].cross_attention.query_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_query_weight[i].data<data_t_>()); params[i].cross_attention.query_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_query_bias[i].data<data_t_>()); // key params[i].cross_attention.key_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_key_weight[i].data<data_t_>()); params[i].cross_attention.key_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_key_bias[i].data<data_t_>()); // value params[i].cross_attention.value_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_value_weight[i].data<data_t_>()); params[i].cross_attention.value_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_value_bias[i].data<data_t_>()); // out proj params[i].cross_attention.attention_output_weight.kernel = reinterpret_cast<const DataType_*>( cross_attn_output_weight[i].data<data_t_>()); params[i].cross_attention.attention_output_weight.bias = reinterpret_cast<const DataType_*>( cross_attn_output_bias[i].data<data_t_>()); // ffn params[i].ffn_layernorm.gamma = reinterpret_cast<const DataType_*>( ffn_layernorm_weight[i].data<data_t_>()); params[i].ffn_layernorm.beta = reinterpret_cast<const DataType_*>( ffn_layernorm_bias[i].data<data_t_>()); // intermediate proj params[i].ffn.intermediate_weight.kernel = reinterpret_cast<const DataType_*>( ffn_intermediate_weight[i].data<data_t_>()); params[i].ffn.intermediate_weight.bias = reinterpret_cast<const DataType_*>( ffn_intermediate_bias[i].data<data_t_>()); // out proj params[i].ffn.output_weight.kernel = reinterpret_cast<const DataType_*>( ffn_output_weight[i].data<data_t_>()); params[i].ffn.output_weight.bias = reinterpret_cast<const DataType_*>(ffn_output_bias[i].data<data_t_>()); } decoding_params.layernorm.gamma = reinterpret_cast<const DataType_*>( decoder_layernorm_weight.data<data_t_>()); decoding_params.layernorm.beta = reinterpret_cast<const DataType_*>( decoder_layernorm_bias.data<data_t_>()); // for embedding decoding_params.embedding_table = reinterpret_cast<const DataType_*>(word_emb.data<data_t_>()); // for weight sharing matmul decoding_params.embedding_kernel = reinterpret_cast<const DataType_*>(embedding_weight.data<data_t_>()); // NOTE: the data type of the embedding bias for logits is different // between decoding with beam search and top-k/top-p sampling in // FasterTransformer when using float16. if ("beam_search" == decoding_strategy || "beam_search_v2" == decoding_strategy) { // for matmul bias decoding_params.embedding_bias = reinterpret_cast<const float*>(embedding_bias.data<float>()); } else if ("topk_sampling" == decoding_strategy || "topp_sampling" == decoding_strategy) { decoding_params.embedding_bias_T = reinterpret_cast<const DataType_*>(embedding_bias.data<data_t_>()); } decoding_params.position_encoding_table = reinterpret_cast<const DataType_*>( position_encoding_table.data<data_t_>()); if ("beam_search" == decoding_strategy) { DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beam_search_; decoding_beam_search_ = new DecodingBeamsearch<DecodingTraits_::OpType>( allocator_, batch_size_, beam_width_, max_seq_len_, head_num_, size_per_head_, vocab_size, num_layer_, memory_hidden_dim, memory_max_seq_len, start_id_, end_id_, beam_search_diversity_rate_); decoding_beam_search_->forward(params, decoding_params); delete decoding_beam_search_; } else if ("beam_search_v2" == decoding_strategy) { DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beam_search_; decoding_beam_search_ = new DecodingBeamsearch<DecodingTraits_::OpType>( allocator_, batch_size_, beam_width_, max_seq_len_, head_num_, size_per_head_, vocab_size, num_layer_, memory_hidden_dim, memory_max_seq_len, start_id_, end_id_, beam_search_diversity_rate_, true, // is_fuse_topk_softMax_ true, // keep_alive_beam_ alpha); decoding_beam_search_->forward(params, decoding_params); delete decoding_beam_search_; } else if ("topk_sampling" == decoding_strategy || "topp_sampling" == decoding_strategy) { DecodingSampling<DecodingTraits_::OpType>* decoding_sampling_; decoding_sampling_ = new DecodingSampling<DecodingTraits_::OpType>(allocator_, batch_size_, max_seq_len_, head_num_, size_per_head_, vocab_size, num_layer_, memory_hidden_dim, memory_max_seq_len, start_id_, end_id_, candidate_num_, probability_threshold_); decoding_sampling_->forward(params, decoding_params); delete decoding_sampling_; } else { PD_THROW( "Only beam_search, topk_sampling and topp_sampling are supported for " "FasterTransformer. "); } delete[] params; return {output_ids, parent_ids, sequence_length}; } std::vector<paddle::Tensor> DecodingCUDAForward( const paddle::Tensor& input, const paddle::Tensor& mem_seq_len, const paddle::Tensor& word_embedding, const std::vector<paddle::Tensor>& self_ln_weight, const std::vector<paddle::Tensor>& self_ln_bias, const std::vector<paddle::Tensor>& self_q_weight, const std::vector<paddle::Tensor>& self_q_bias, const std::vector<paddle::Tensor>& self_k_weight, const std::vector<paddle::Tensor>& self_k_bias, const std::vector<paddle::Tensor>& self_v_weight, const std::vector<paddle::Tensor>& self_v_bias, const std::vector<paddle::Tensor>& self_out_weight, const std::vector<paddle::Tensor>& self_out_bias, const std::vector<paddle::Tensor>& cross_ln_weight, const std::vector<paddle::Tensor>& cross_ln_bias, const std::vector<paddle::Tensor>& cross_q_weight, const std::vector<paddle::Tensor>& cross_q_bias, const std::vector<paddle::Tensor>& cross_k_weight, const std::vector<paddle::Tensor>& cross_k_bias, const std::vector<paddle::Tensor>& cross_v_weight, const std::vector<paddle::Tensor>& cross_v_bias, const std::vector<paddle::Tensor>& cross_out_weight, const std::vector<paddle::Tensor>& cross_out_bias, const std::vector<paddle::Tensor>& ffn_ln_weight, const std::vector<paddle::Tensor>& ffn_ln_bias, const std::vector<paddle::Tensor>& ffn_inter_weight, const std::vector<paddle::Tensor>& ffn_inter_bias, const std::vector<paddle::Tensor>& ffn_out_weight, const std::vector<paddle::Tensor>& ffn_out_bias, const paddle::Tensor& decoder_ln_weight, const paddle::Tensor& decoder_ln_bias, const paddle::Tensor& embedding_weight, const paddle::Tensor& embedding_bias, const paddle::Tensor& positional_embedding_weight, paddle::Tensor& output_ids, paddle::Tensor& parent_ids, paddle::Tensor& sequence_length, std::string decoding_strategy, int beam_size, int topk, float topp, int n_head, int size_per_head, int num_layer, int bos_id, int eos_id, int64_t max_len, float beam_search_diversity_rate, float alpha) { auto stream = input.stream(); cublasHandle_t cublas_handle_; cublasCreate(&cublas_handle_); cublasSetStream(cublas_handle_, stream); std::vector<paddle::Tensor> ret; switch (input.type()) { case paddle::DataType::FLOAT16: { ret = decoding_kernel<paddle::DataType::FLOAT16>( input, mem_seq_len, word_embedding, self_ln_weight, self_ln_bias, self_q_weight, self_q_bias, self_k_weight, self_k_bias, self_v_weight, self_v_bias, self_out_weight, self_out_bias, cross_ln_weight, cross_ln_bias, cross_q_weight, cross_q_bias, cross_k_weight, cross_k_bias, cross_v_weight, cross_v_bias, cross_out_weight, cross_out_bias, ffn_ln_weight, ffn_ln_bias, ffn_inter_weight, ffn_inter_bias, ffn_out_weight, ffn_out_bias, decoder_ln_weight, decoder_ln_bias, embedding_weight, embedding_bias, positional_embedding_weight, output_ids, parent_ids, sequence_length, decoding_strategy, beam_size, topk, topp, n_head, size_per_head, num_layer, bos_id, eos_id, max_len, beam_search_diversity_rate, alpha, cublas_handle_, stream); break; } case paddle::DataType::FLOAT32: { ret = decoding_kernel<paddle::DataType::FLOAT32>( input, mem_seq_len, word_embedding, self_ln_weight, self_ln_bias, self_q_weight, self_q_bias, self_k_weight, self_k_bias, self_v_weight, self_v_bias, self_out_weight, self_out_bias, cross_ln_weight, cross_ln_bias, cross_q_weight, cross_q_bias, cross_k_weight, cross_k_bias, cross_v_weight, cross_v_bias, cross_out_weight, cross_out_bias, ffn_ln_weight, ffn_ln_bias, ffn_inter_weight, ffn_inter_bias, ffn_out_weight, ffn_out_bias, decoder_ln_weight, decoder_ln_bias, embedding_weight, embedding_bias, positional_embedding_weight, output_ids, parent_ids, sequence_length, decoding_strategy, beam_size, topk, topp, n_head, size_per_head, num_layer, bos_id, eos_id, max_len, beam_search_diversity_rate, alpha, cublas_handle_, stream); break; } default: { PD_THROW( "NOT supported data type. " "Only float16 and float32 are supported. "); break; } } cublasDestroy(cublas_handle_); return ret; }
35efb34749cf2b6471317750753f4f63aa02ace4.hip
// !!! This is a file automatically generated by hipify!!! /* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <stdio.h> #include <iostream> #include <exception> #include <fstream> #include <vector> #include <algorithm> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> #include <hip/hip_runtime.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "rocblas.h" using namespace std; /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C, REAL *C_base); void matmul_cuda_v2_shmem(int N, REAL *A, REAL *B, REAL *C, REAL *C_base); void matmul_cuda_v3_cublas(int N, REAL *A, REAL *B, REAL *C, REAL *C_base); //kernel code for kernel version 1-global memory __global__ void cuda_vanilla(int N, REAL *A, REAL *B, REAL *C ) { int row= blockIdx.y*blockDim.y+threadIdx.y; int col= blockIdx.x*blockDim.x+threadIdx.x; if((row<N)&&(col<N)) { float sum = 0.0; #pragma unroll for(int i=0;i<N;++i) { sum += A[row*N+i]*B[col+i*N]; } __syncthreads(); C[row*N+col] = sum; } } //kernel code for kernel version 2-shared memory template <int BLOCK_SIZE> __global__ void cuda_shmem (float *C, float *A, float *B, int wA, int wB) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; int aEnd = aBegin + wA - 1; int aStep = BLOCK_SIZE; int bBegin = BLOCK_SIZE * bx; int bStep = BLOCK_SIZE * wB; float Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; __syncthreads(); #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } __syncthreads(); } int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } /* main function*/ int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; REAL *C_v1 = (REAL *) malloc(N*N*4); REAL *C_v2 = (REAL *) malloc(N*N*4); REAL *C_v3 = (REAL *) malloc(N*N*4); srand48((1 << 12)); init(N, N, A); init(N, N, B); hipSetDevice(0); elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); printf("\n\n\n======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); printf("matmul_GPU:-------------------------------------------------------------------------------------------------\n"); matmul_cuda_v1_vanilla(N, A, B, C_v1, C_base);//call to version 1 function matmul_cuda_v2_shmem(N,A,B,C_v2,C_base);//call to version 2 function matmul_cuda_v3_cublas(N,A,B,C_v3,C_base);//call to version 3 function printf("------------------------------------------------------------------------------------------------------------\n\n\n"); free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to version kernel function that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C_v1, REAL *C_base) { int mem_size_A = sizeof(float)*N*N; int mem_size_B = sizeof(float)*N*N; int mem_size_C = sizeof(float)*N*N; float *d_A, *d_B, *d_C; hipMalloc((void **) &d_A, mem_size_A); hipMalloc((void **) &d_B, mem_size_B); hipMalloc((void **) &d_C, mem_size_C); hipMemcpy(d_A, A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, mem_size_B, hipMemcpyHostToDevice); dim3 dimgrid ((N-1)/16+1,(N-1)/16+1,1); dim3 dimblock (16,16,1); // Performs warmup operation using kernel hipLaunchKernelGGL(( cuda_vanilla) , dim3(dimgrid),dim3(dimblock), 0, 0, N, d_A, d_B, d_C); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; hipEventCreate(&start); hipEvent_t stop; hipEventCreate(&stop); // Record the start event hipEventRecord(start, NULL); // Execute the kernel - acessing global memory hipLaunchKernelGGL(( cuda_vanilla) , dim3(dimgrid),dim3(dimblock), 0, 0, N, d_A, d_B, d_C); // Record the stop event hipEventRecord(stop, NULL); // Wait for the stop event to complete hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); // Computing the performance double elapsed_cuda_v1 = msecTotal;//time double flopsPerMatrixMul_v1 = 2.0 *N*N*N; double gigaFlops_v1 = (flopsPerMatrixMul_v1 * 1.0e-9f) / (elapsed_cuda_v1 / 1000.0f);//performance // Copy result from device to host hipMemcpy(C_v1, d_C, mem_size_C, hipMemcpyDeviceToHost); // Clean up memory hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); printf("GlobalMemory_V1:\t%4f\t%4fx10^3 \t\t\t%g\n", elapsed_cuda_v1, gigaFlops_v1, maxerror(N, N, C_base, C_v1)); } /* * call to kernel version 2 that use GPU shared memory */ void matmul_cuda_v2_shmem(int N, REAL *A, REAL *B, REAL *C_v2, REAL *C_base) { int block_size = 16; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); dimsA.x = N; dimsA.y = N; dimsB.x = N; dimsB.y = N; int mem_size_A = sizeof(float)*N*N; int mem_size_B = sizeof(float)*N*N; int mem_size_C = sizeof(float)*N*N; float *d_A, *d_B, *d_C; hipMalloc((void **) &d_A, mem_size_A); hipMalloc((void **) &d_B, mem_size_B); hipMalloc((void **) &d_C, mem_size_C); hipMemcpy(d_A, A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, mem_size_B, hipMemcpyHostToDevice); dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Performs warmup operation using matrixMul CUDA kernel hipLaunchKernelGGL(( cuda_shmem<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; hipEventCreate(&start); hipEvent_t stop; hipEventCreate(&stop); // Record the start event hipEventRecord(start, NULL); // Executing the kernel hipLaunchKernelGGL(( cuda_shmem<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); // Record the stop event hipEventRecord(stop, NULL); // Wait for the stop event to complete hipEventSynchronize(stop); // Computing the performance float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); double elapsed_cuda_v2 = msecTotal;//time double flopsPerMatrixMul_v2 = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops_v2 = (flopsPerMatrixMul_v2 * 1.0e-9f) / (elapsed_cuda_v2 / 1000.0f);//performance // Copy result from device to host hipMemcpy(C_v2, d_C, mem_size_C, hipMemcpyDeviceToHost); // Clean up memory hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); printf("SharedMemory_V2:\t%4f\t%4fx10^3\t\t\t%g\n", elapsed_cuda_v2, gigaFlops_v2, maxerror(N, N, C_base, C_v2)); } /* * call to sgemm of cublas library */ void matmul_cuda_v3_cublas(int N, REAL *A, REAL *B, REAL *C_v3, REAL *C_base) { int mem_size_A = sizeof(float)*N*N; int mem_size_B = sizeof(float)*N*N; int mem_size_C = sizeof(float)*N*N; float *d_A, *d_B, *d_C; hipMalloc((void **) &d_A, mem_size_A); hipMalloc((void **) &d_B, mem_size_B); hipMalloc((void **) &d_C, mem_size_C); hipMemcpy(d_A, A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, B, mem_size_B, hipMemcpyHostToDevice); //initialize the cublas parameters const float alpha = 1.0f; const float beta = 0.0f; // Create the cuBLAS handle hipblasHandle_t handle; hipblasCreate(&handle); //Perform warmup operation with cublas hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_B, N, d_A, N, &beta, d_C, N); // Allocate CUDA events that we'll use for timing hipEvent_t start; hipEventCreate(&start); hipEvent_t stop; hipEventCreate(&stop); // Record the start event hipEventRecord(start, NULL); // Execute the matrix-vector multiplication hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_B, N, d_A, N, &beta, d_C, N); // Record the stop event hipEventRecord(stop, NULL); // Wait for the stop event to complete hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); // Computing the performance double elapsed_cuda_v3 = msecTotal;//time double flopsPerMatrixMul_v3 = 2.0 *N*N*N; double gigaFlops_v3 = (flopsPerMatrixMul_v3 * 1.0e-9f) / (elapsed_cuda_v3 / 1000.0f);//performance // Copy result from device to host hipMemcpy(C_v3, d_C, mem_size_C, hipMemcpyDeviceToHost); // Clean up memory hipFree(d_A); hipFree(d_B); hipFree(d_C); hipblasDestroy(handle); hipDeviceReset(); printf("cuBLAS_V3:\t\t%4f\t%4fx10^3 \t\t\t%g\n", elapsed_cuda_v3, gigaFlops_v3, maxerror(N, N, C_base, C_v3)); }
35efb34749cf2b6471317750753f4f63aa02ace4.cu
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <stdio.h> #include <iostream> #include <exception> #include <fstream> #include <vector> #include <algorithm> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> #include <cuda.h> #include <assert.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include "cublas_v2.h" using namespace std; /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C, REAL *C_base); void matmul_cuda_v2_shmem(int N, REAL *A, REAL *B, REAL *C, REAL *C_base); void matmul_cuda_v3_cublas(int N, REAL *A, REAL *B, REAL *C, REAL *C_base); //kernel code for kernel version 1-global memory __global__ void cuda_vanilla(int N, REAL *A, REAL *B, REAL *C ) { int row= blockIdx.y*blockDim.y+threadIdx.y; int col= blockIdx.x*blockDim.x+threadIdx.x; if((row<N)&&(col<N)) { float sum = 0.0; #pragma unroll for(int i=0;i<N;++i) { sum += A[row*N+i]*B[col+i*N]; } __syncthreads(); C[row*N+col] = sum; } } //kernel code for kernel version 2-shared memory template <int BLOCK_SIZE> __global__ void cuda_shmem (float *C, float *A, float *B, int wA, int wB) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; int aEnd = aBegin + wA - 1; int aStep = BLOCK_SIZE; int bBegin = BLOCK_SIZE * bx; int bStep = BLOCK_SIZE * wB; float Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; __syncthreads(); #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } __syncthreads(); } int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } /* main function*/ int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; REAL *C_v1 = (REAL *) malloc(N*N*4); REAL *C_v2 = (REAL *) malloc(N*N*4); REAL *C_v3 = (REAL *) malloc(N*N*4); srand48((1 << 12)); init(N, N, A); init(N, N, B); cudaSetDevice(0); elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); printf("\n\n\n======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); printf("matmul_GPU:-------------------------------------------------------------------------------------------------\n"); matmul_cuda_v1_vanilla(N, A, B, C_v1, C_base);//call to version 1 function matmul_cuda_v2_shmem(N,A,B,C_v2,C_base);//call to version 2 function matmul_cuda_v3_cublas(N,A,B,C_v3,C_base);//call to version 3 function printf("------------------------------------------------------------------------------------------------------------\n\n\n"); free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to version kernel function that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C_v1, REAL *C_base) { int mem_size_A = sizeof(float)*N*N; int mem_size_B = sizeof(float)*N*N; int mem_size_C = sizeof(float)*N*N; float *d_A, *d_B, *d_C; cudaMalloc((void **) &d_A, mem_size_A); cudaMalloc((void **) &d_B, mem_size_B); cudaMalloc((void **) &d_C, mem_size_C); cudaMemcpy(d_A, A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, mem_size_B, cudaMemcpyHostToDevice); dim3 dimgrid ((N-1)/16+1,(N-1)/16+1,1); dim3 dimblock (16,16,1); // Performs warmup operation using kernel cuda_vanilla <<<dimgrid,dimblock>>> (N, d_A, d_B, d_C); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; cudaEventCreate(&start); cudaEvent_t stop; cudaEventCreate(&stop); // Record the start event cudaEventRecord(start, NULL); // Execute the kernel - acessing global memory cuda_vanilla <<<dimgrid,dimblock>>> (N, d_A, d_B, d_C); // Record the stop event cudaEventRecord(stop, NULL); // Wait for the stop event to complete cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); // Computing the performance double elapsed_cuda_v1 = msecTotal;//time double flopsPerMatrixMul_v1 = 2.0 *N*N*N; double gigaFlops_v1 = (flopsPerMatrixMul_v1 * 1.0e-9f) / (elapsed_cuda_v1 / 1000.0f);//performance // Copy result from device to host cudaMemcpy(C_v1, d_C, mem_size_C, cudaMemcpyDeviceToHost); // Clean up memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); printf("GlobalMemory_V1:\t%4f\t%4fx10^3 \t\t\t%g\n", elapsed_cuda_v1, gigaFlops_v1, maxerror(N, N, C_base, C_v1)); } /* * call to kernel version 2 that use GPU shared memory */ void matmul_cuda_v2_shmem(int N, REAL *A, REAL *B, REAL *C_v2, REAL *C_base) { int block_size = 16; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); dimsA.x = N; dimsA.y = N; dimsB.x = N; dimsB.y = N; int mem_size_A = sizeof(float)*N*N; int mem_size_B = sizeof(float)*N*N; int mem_size_C = sizeof(float)*N*N; float *d_A, *d_B, *d_C; cudaMalloc((void **) &d_A, mem_size_A); cudaMalloc((void **) &d_B, mem_size_B); cudaMalloc((void **) &d_C, mem_size_C); cudaMemcpy(d_A, A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, mem_size_B, cudaMemcpyHostToDevice); dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Performs warmup operation using matrixMul CUDA kernel cuda_shmem<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; cudaEventCreate(&start); cudaEvent_t stop; cudaEventCreate(&stop); // Record the start event cudaEventRecord(start, NULL); // Executing the kernel cuda_shmem<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); // Record the stop event cudaEventRecord(stop, NULL); // Wait for the stop event to complete cudaEventSynchronize(stop); // Computing the performance float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); double elapsed_cuda_v2 = msecTotal;//time double flopsPerMatrixMul_v2 = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops_v2 = (flopsPerMatrixMul_v2 * 1.0e-9f) / (elapsed_cuda_v2 / 1000.0f);//performance // Copy result from device to host cudaMemcpy(C_v2, d_C, mem_size_C, cudaMemcpyDeviceToHost); // Clean up memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); printf("SharedMemory_V2:\t%4f\t%4fx10^3\t\t\t%g\n", elapsed_cuda_v2, gigaFlops_v2, maxerror(N, N, C_base, C_v2)); } /* * call to sgemm of cublas library */ void matmul_cuda_v3_cublas(int N, REAL *A, REAL *B, REAL *C_v3, REAL *C_base) { int mem_size_A = sizeof(float)*N*N; int mem_size_B = sizeof(float)*N*N; int mem_size_C = sizeof(float)*N*N; float *d_A, *d_B, *d_C; cudaMalloc((void **) &d_A, mem_size_A); cudaMalloc((void **) &d_B, mem_size_B); cudaMalloc((void **) &d_C, mem_size_C); cudaMemcpy(d_A, A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, mem_size_B, cudaMemcpyHostToDevice); //initialize the cublas parameters const float alpha = 1.0f; const float beta = 0.0f; // Create the cuBLAS handle cublasHandle_t handle; cublasCreate(&handle); //Perform warmup operation with cublas cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_B, N, d_A, N, &beta, d_C, N); // Allocate CUDA events that we'll use for timing cudaEvent_t start; cudaEventCreate(&start); cudaEvent_t stop; cudaEventCreate(&stop); // Record the start event cudaEventRecord(start, NULL); // Execute the matrix-vector multiplication cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_B, N, d_A, N, &beta, d_C, N); // Record the stop event cudaEventRecord(stop, NULL); // Wait for the stop event to complete cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); // Computing the performance double elapsed_cuda_v3 = msecTotal;//time double flopsPerMatrixMul_v3 = 2.0 *N*N*N; double gigaFlops_v3 = (flopsPerMatrixMul_v3 * 1.0e-9f) / (elapsed_cuda_v3 / 1000.0f);//performance // Copy result from device to host cudaMemcpy(C_v3, d_C, mem_size_C, cudaMemcpyDeviceToHost); // Clean up memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cublasDestroy(handle); cudaDeviceReset(); printf("cuBLAS_V3:\t\t%4f\t%4fx10^3 \t\t\t%g\n", elapsed_cuda_v3, gigaFlops_v3, maxerror(N, N, C_base, C_v3)); }
19709014c543e72b0fd87d79fa3272b201501f85.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "src/gpu_utils/cuda_settings.h" #include "src/gpu_utils/cuda_helper_functions.cuh" #include "src/gpu_utils/cuda_kernels/BP.cuh" #include "src/macros.h" #include "src/error.h" long int makeJobsForDiff2Fine( OptimisationParamters &op, SamplingParameters &sp, long int orientation_num, long int translation_num, ProjectionParams &FineProjectionData, std::vector< long unsigned > &iover_transes, std::vector< long unsigned > &ihiddens, long int nr_over_orient, long int nr_over_trans, int ipart, IndexedDataArray &FPW, // FPW=FinePassWeights IndexedDataArrayMask &dataMask, int chunk) { long int w_base = dataMask.firstPos, w(0), k(0); // be on the safe side with the jobArrays: make them as large as they could possibly be // (this will be reduced at exit of this function) dataMask.setNumberOfJobs(orientation_num*translation_num); dataMask.setNumberOfWeights(orientation_num*translation_num); dataMask.jobOrigin.host_alloc(); dataMask.jobExtent.host_alloc(); dataMask.jobOrigin[k]=0; for (long unsigned i = 0; i < orientation_num; i++) { dataMask.jobExtent[k]=0; int tk=0; long int iover_rot = FineProjectionData.iover_rots[i]; for (long unsigned j = 0; j < translation_num; j++) { long int iover_trans = iover_transes[j]; long int ihidden = FineProjectionData.iorientclasses[i] * sp.nr_trans + ihiddens[j]; if(DIRECT_A2D_ELEM(op.Mcoarse_significant, ipart, ihidden)==1) { FPW.rot_id[w_base+w] = FineProjectionData.iorientclasses[i] % (sp.nr_dir*sp.nr_psi); // where to look for priors etc FPW.rot_idx[w_base+w] = i; // which rot for this significant task FPW.trans_idx[w_base+w] = j; // which trans - || - FPW.ihidden_overs[w_base+w]= (ihidden * nr_over_orient + iover_rot) * nr_over_trans + iover_trans; if(tk>=chunk) { tk=0; // reset counter k++; // use new element dataMask.jobOrigin[k]=w; dataMask.jobExtent[k]=0; // prepare next element for ++ incrementing } tk++; // increment limit-checker dataMask.jobExtent[k]++; // increment number of transes this job w++; } else if(tk!=0) // start a new one with the same rotidx - we expect transes to be sequential. { tk=0; // reset counter k++; // use new element dataMask.jobOrigin[k]=w; dataMask.jobExtent[k]=0; // prepare next element for ++ incrementing } } if(tk>0) // use new element (if tk==0) then we are currently on an element with no signif, so we should continue using this element { k++; dataMask.jobOrigin[k]=w; dataMask.jobExtent[k]=0; } } if(dataMask.jobExtent[k]!=0) // if we started putting somehting in last element, then the count is one higher than the index k+=1; dataMask.setNumberOfJobs(k); dataMask.setNumberOfWeights(w); // if(dataMask.weightNum>0) // { // dataMask.jobOrigin.device_alloc(); // dataMask.jobExtent.device_alloc(); // } return(w); } int makeJobsForCollect(IndexedDataArray &FPW, IndexedDataArrayMask &dataMask, unsigned long NewJobNum) // FPW=FinePassWeights { // reset the old (diff2Fine) job-definitions // dataMask.jobOrigin.free_host(); // dataMask.jobOrigin.free_device(); // dataMask.jobExtent.free_host(); // dataMask.jobExtent.free_device(); dataMask.setNumberOfJobs(NewJobNum); // dataMask.jobOrigin.host_alloc(); // dataMask.jobExtent.host_alloc(); long int jobid=0; dataMask.jobOrigin[jobid]=0; dataMask.jobExtent[jobid]=1; long int crot =FPW.rot_idx[jobid]; // set current rot for(long int n=1; n<FPW.rot_idx.size; n++) { if(FPW.rot_idx[n]==crot) { dataMask.jobExtent[jobid]++; } else { jobid++; dataMask.jobExtent[jobid]=1; dataMask.jobOrigin[jobid]=n; crot=FPW.rot_idx[n]; } } dataMask.setNumberOfJobs(jobid+1); // because max index is one less than size // dataMask.jobOrigin.put_on_device(); // dataMask.jobExtent.put_on_device(); return (jobid+1); } /* * Maps weights to a decoupled indexing of translations and orientations */ void mapWeights( unsigned long orientation_start, XFLOAT *mapped_weights, unsigned orientation_num, unsigned long idxArr_start, unsigned long idxArr_end, unsigned translation_num, XFLOAT *weights, long unsigned *rot_idx, long unsigned *trans_idx, unsigned long current_oversampling) { for (long unsigned i = 0; i < orientation_num*translation_num; i++) mapped_weights[i] = -999.; for (long unsigned i = idxArr_start; i < idxArr_end; i++) mapped_weights[ (rot_idx[i]-orientation_start) * translation_num + trans_idx[i] ]= weights[i]; } void buildCorrImage(MlOptimiser *baseMLO, OptimisationParamters &op, CudaGlobalPtr<XFLOAT> &corr_img, long int ipart, long int group_id) { // CC or not if((baseMLO->iter == 1 && baseMLO->do_firstiter_cc) || baseMLO->do_always_cc) for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] = 1. / (op.local_sqrtXi2[ipart]*op.local_sqrtXi2[ipart]); else for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] = *(op.local_Minvsigma2s[ipart].data + i ); // ctf-correction or not ( NOTE this is not were the difference metric is ctf-corrected, but // rather where we apply the additional correction to make the GPU-specific arithmetic equal // to the CPU method) if (baseMLO->do_ctf_correction && baseMLO->refs_are_ctf_corrected) for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] *= DIRECT_MULTIDIM_ELEM(op.local_Fctfs[ipart], i)*DIRECT_MULTIDIM_ELEM(op.local_Fctfs[ipart], i); // scale-correction or not ( NOTE this is not were the difference metric is scale-corrected, but // rather where we apply the additional correction to make the GPU-specific arithmetic equal // to the CPU method) XFLOAT myscale = baseMLO->mymodel.scale_correction[group_id]; if (baseMLO->do_scale_correction) for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] *= myscale * myscale; } void generateEulerMatrices( XFLOAT padding_factor, ProjectionParams &ProjectionData, XFLOAT *eulers, bool inverse) { RFLOAT alpha, beta, gamma; RFLOAT ca, sa, cb, sb, cg, sg; RFLOAT cc, cs, sc, ss; for (long int i = 0; i < ProjectionData.rots.size(); i++) { //TODO In a sense we're doing RAD2DEG just to do DEG2RAD here. //The only place the degree value is actually used is in the metadata assignment. alpha = DEG2RAD(ProjectionData.rots[i]); beta = DEG2RAD(ProjectionData.tilts[i]); gamma = DEG2RAD(ProjectionData.psis[i]); sincos(alpha, &sa, &ca); sincos(beta, &sb, &cb); sincos(gamma, &sg, &cg); cc = cb * ca; cs = cb * sa; sc = sb * ca; ss = sb * sa; if(inverse) { eulers[9 * i + 0] = ( cg * cc - sg * sa) ;// * padding_factor; //00 eulers[9 * i + 1] = (-sg * cc - cg * sa) ;// * padding_factor; //10 eulers[9 * i + 2] = ( sc ) ;// * padding_factor; //20 eulers[9 * i + 3] = ( cg * cs + sg * ca) ;// * padding_factor; //01 eulers[9 * i + 4] = (-sg * cs + cg * ca) ;// * padding_factor; //11 eulers[9 * i + 5] = ( ss ) ;// * padding_factor; //21 eulers[9 * i + 6] = (-cg * sb ) ;// * padding_factor; //02 eulers[9 * i + 7] = ( sg * sb ) ;// * padding_factor; //12 eulers[9 * i + 8] = ( cb ) ;// * padding_factor; //22 } else { eulers[9 * i + 0] = ( cg * cc - sg * sa) ;// * padding_factor; //00 eulers[9 * i + 1] = ( cg * cs + sg * ca) ;// * padding_factor; //01 eulers[9 * i + 2] = (-cg * sb ) ;// * padding_factor; //02 eulers[9 * i + 3] = (-sg * cc - cg * sa) ;// * padding_factor; //10 eulers[9 * i + 4] = (-sg * cs + cg * ca) ;// * padding_factor; //11 eulers[9 * i + 5] = ( sg * sb ) ;// * padding_factor; //12 eulers[9 * i + 6] = ( sc ) ;// * padding_factor; //20 eulers[9 * i + 7] = ( ss ) ;// * padding_factor; //21 eulers[9 * i + 8] = ( cb ) ;// * padding_factor; //22 } } } long unsigned generateProjectionSetupFine( OptimisationParamters &op, SamplingParameters &sp, MlOptimiser *baseMLO, unsigned iclass, ProjectionParams &ProjectionData) // FIXME : For coarse iteration this is **SLOW** HERE ARE SOME NOTES FOR PARALLELIZING IT (GPU OFFLOAD): /* * Since it is based on push_back, parallelizing sould be fine given som atomic opreation appends, * what takes time is looping through all this. The job-splitting in collect2jobs-preproccesing and * divideOrientationsIntoBlockjobs() relies on chunks of shared orientations being adjacent in * ProjectionData.rot_id (and thus also .rot_idx), but does not care which order those chunks appear * in. So as long as a parallelilsm and "atomic push_back" is organised to use an orientation as a * minimum unit, the job-splitting should be fine with the output. */ { //Local variables std::vector< RFLOAT > oversampled_rot, oversampled_tilt, oversampled_psi; long int orientation_num = 0; for (long int idir = sp.idir_min, iorient = 0; idir <= sp.idir_max; idir++) { for (long int ipsi = sp.ipsi_min, ipart = 0; ipsi <= sp.ipsi_max; ipsi++, iorient++) { long int iorientclass = iclass * sp.nr_dir * sp.nr_psi + iorient; if (baseMLO->isSignificantAnyParticleAnyTranslation(iorientclass, sp.itrans_min, sp.itrans_max, op.Mcoarse_significant)) { // Now get the oversampled (rot, tilt, psi) triplets // This will be only the original (rot,tilt,psi) triplet in the first pass (sp.current_oversampling==0) baseMLO->sampling.getOrientations(idir, ipsi, sp.current_oversampling, oversampled_rot, oversampled_tilt, oversampled_psi, op.pointer_dir_nonzeroprior, op.directions_prior, op.pointer_psi_nonzeroprior, op.psi_prior); // Loop over all oversampled orientations (only a single one in the first pass) for (long int iover_rot = 0; iover_rot < sp.nr_oversampled_rot; iover_rot++, ipart++) { ProjectionData.pushBackAll( (long unsigned)iclass, oversampled_rot[iover_rot], oversampled_tilt[iover_rot], oversampled_psi[iover_rot], iorientclass, iover_rot ); orientation_num ++; } } } } ProjectionData.orientation_num[iclass]=orientation_num; return orientation_num; } void runWavgKernel( CudaProjectorKernel &projector, XFLOAT *eulers, XFLOAT *Fimg_real, XFLOAT *Fimg_imag, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT *sorted_weights, XFLOAT *ctfs, XFLOAT *wdiff2s_parts, XFLOAT *wdiff2s_AA, XFLOAT *wdiff2s_XA, OptimisationParamters &op, long unsigned orientation_num, long unsigned translation_num, unsigned image_size, long int ipart, int group_id, int exp_iclass, XFLOAT part_scale, bool refs_are_ctf_corrected, bool data_is_3D, hipStream_t stream) { //We only want as many blocks as there are chunks of orientations to be treated //within the same block (this is done to reduce memory loads in the kernel). dim3 block_dim = orientation_num;//ceil((float)orientation_num/(float)REF_GROUP_SIZE); //hipFuncSetCacheConfig(cuda_kernel_wavg_fast, hipFuncCachePreferShared); if (refs_are_ctf_corrected) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_wavg<true,true,true,WAVG_BLOCK_SIZE_DATA3D>), dim3(block_dim),dim3(WAVG_BLOCK_SIZE_DATA3D),(3*WAVG_BLOCK_SIZE_DATA3D+9)*sizeof(XFLOAT),stream, eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else if (projector.mdlZ!=0) hipLaunchKernelGGL(( cuda_kernel_wavg<true,true,false,WAVG_BLOCK_SIZE>), dim3(block_dim),dim3(WAVG_BLOCK_SIZE),(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream, eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else hipLaunchKernelGGL(( cuda_kernel_wavg<true,false,false,WAVG_BLOCK_SIZE>), dim3(block_dim),dim3(WAVG_BLOCK_SIZE),(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream, eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); } else { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_wavg<false,true,true,WAVG_BLOCK_SIZE_DATA3D>), dim3(block_dim),dim3(WAVG_BLOCK_SIZE_DATA3D),(3*WAVG_BLOCK_SIZE_DATA3D+9)*sizeof(XFLOAT),stream, eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else if (projector.mdlZ!=0) hipLaunchKernelGGL(( cuda_kernel_wavg<false,true,false,WAVG_BLOCK_SIZE>), dim3(block_dim),dim3(WAVG_BLOCK_SIZE),(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream, eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else hipLaunchKernelGGL(( cuda_kernel_wavg<false,false,false,WAVG_BLOCK_SIZE>), dim3(block_dim),dim3(WAVG_BLOCK_SIZE),(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream, eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); } LAUNCH_HANDLE_ERROR(hipGetLastError()); } void runBackProjectKernel( CudaBackprojector &BP, CudaProjectorKernel &projector, XFLOAT *d_img_real, XFLOAT *d_img_imag, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT* d_weights, XFLOAT* d_Minvsigma2s, XFLOAT* d_ctfs, unsigned long translation_num, XFLOAT significant_weight, XFLOAT weight_norm, XFLOAT *d_eulers, int imgX, int imgY, int imgZ, unsigned long imageCount, bool data_is_3D, bool do_sgd, hipStream_t optStream) { if(BP.mdlZ==1) { hipLaunchKernelGGL(( cuda_kernel_backproject2D), dim3(imageCount),dim3(BP_2D_BLOCK_SIZE),0,optStream, d_img_real, d_img_imag, trans_x, trans_y, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgX*imgY, BP.mdlX, BP.mdlInitY); LAUNCH_HANDLE_ERROR(hipGetLastError()); } else { if(do_sgd) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_backprojectSGD<true>), dim3(imageCount),dim3(BP_DATA3D_BLOCK_SIZE),0,optStream, projector, d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); else hipLaunchKernelGGL(( cuda_kernel_backprojectSGD<false>), dim3(imageCount),dim3(BP_REF3D_BLOCK_SIZE),0,optStream, projector, d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); } else { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_backproject3D<true>), dim3(imageCount),dim3(BP_DATA3D_BLOCK_SIZE),0,optStream, d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); else hipLaunchKernelGGL(( cuda_kernel_backproject3D<false>), dim3(imageCount),dim3(BP_REF3D_BLOCK_SIZE),0,optStream, d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); } LAUNCH_HANDLE_ERROR(hipGetLastError()); } } __global__ void cuda_kernel_allweights_to_mweights( unsigned long * d_iorient, XFLOAT * d_allweights, XFLOAT * d_mweights, unsigned long orientation_num, unsigned long translation_num ) { size_t idx = blockIdx.x * WEIGHT_MAP_BLOCK_SIZE + threadIdx.x; if (idx < orientation_num*translation_num) d_mweights[d_iorient[idx/translation_num] * translation_num + idx%translation_num] = d_allweights[idx]; //d_mweights[idx/translation_num] = //d_allweights[idx/translation_num * translation_num + idx%translation_num]; } void mapAllWeightsToMweights( unsigned long * d_iorient, //projectorPlan.iorientclasses XFLOAT * d_allweights, //allWeights XFLOAT * d_mweights, //Mweight unsigned long orientation_num, //projectorPlan.orientation_num unsigned long translation_num, //translation_num hipStream_t stream ) { int grid_size = ceil((float)(orientation_num*translation_num)/(float)WEIGHT_MAP_BLOCK_SIZE); hipLaunchKernelGGL(( cuda_kernel_allweights_to_mweights), dim3(grid_size), dim3(WEIGHT_MAP_BLOCK_SIZE), 0, stream , d_iorient, d_allweights, d_mweights, orientation_num, translation_num); LAUNCH_HANDLE_ERROR(hipGetLastError()); } void compareWeights( unsigned long * d_iorient, //projectorPlan.iorientclasses XFLOAT * d_allweights, //allWeights XFLOAT * d_mweights, //Mweight unsigned long orientation_num, //projectorPlan.orientation_num unsigned long translation_num, //translation_num hipStream_t stream ) { int grid_size = ceil((float)(orientation_num*translation_num)/(float)WEIGHT_MAP_BLOCK_SIZE); hipLaunchKernelGGL(( cuda_kernel_allweights_to_mweights), dim3(grid_size), dim3(WEIGHT_MAP_BLOCK_SIZE), 0, stream , d_iorient, d_allweights, d_mweights, orientation_num, translation_num); LAUNCH_HANDLE_ERROR(hipGetLastError()); } size_t findThresholdIdxInCumulativeSum(CudaGlobalPtr<XFLOAT> &data, XFLOAT threshold) { int grid_size = ceil((float)(data.getSize()-1)/(float)FIND_IN_CUMULATIVE_BLOCK_SIZE); if(grid_size==0) { return(0); } else { CudaGlobalPtr<size_t > idx(1, data.getStream(), data.getAllocator()); idx[0] = 0; idx.put_on_device(); hipLaunchKernelGGL(( cuda_kernel_find_threshold_idx_in_cumulative), dim3(grid_size), dim3(FIND_IN_CUMULATIVE_BLOCK_SIZE), 0, data.getStream() , ~data, threshold, data.getSize()-1, ~idx); idx.cp_to_host(); DEBUG_HANDLE_ERROR(hipStreamSynchronize(data.getStream())); return idx[0]; } } void runDiff2KernelCoarse( CudaProjectorKernel &projector, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT *corr_img, XFLOAT *Fimg_real, XFLOAT *Fimg_imag, XFLOAT *d_eulers, XFLOAT *diff2s, XFLOAT local_sqrtXi2, long unsigned orientation_num, int translation_num, int image_size, hipStream_t stream, bool do_CC, bool data_is_3D) { const int blocks3D = (data_is_3D? D2C_BLOCK_SIZE_DATA3D : D2C_BLOCK_SIZE_REF3D); if(!do_CC) { if(projector.mdlZ!=0) { #ifdef CUDA_DOUBLE_PRECISION if (translation_num > blocks3D*4) CRITICAL(ERR_TRANSLIM); #else if (translation_num > blocks3D*8) CRITICAL(ERR_TRANSLIM); #endif unsigned rest = orientation_num % blocks3D; long unsigned even_orientation_num = orientation_num - rest; if (translation_num <= blocks3D) { if (even_orientation_num != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D, D2C_EULERS_PER_BLOCK_DATA3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D),dim3(D2C_BLOCK_SIZE_DATA3D),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D, D2C_EULERS_PER_BLOCK_REF3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D),dim3(D2C_BLOCK_SIZE_REF3D),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_DATA3D),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_REF3D),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } else if (translation_num <= blocks3D*2) { if (even_orientation_num != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*2, D2C_EULERS_PER_BLOCK_DATA3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D),dim3(D2C_BLOCK_SIZE_DATA3D*2),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*2, D2C_EULERS_PER_BLOCK_REF3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D),dim3(D2C_BLOCK_SIZE_REF3D*2),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true, true, D2C_BLOCK_SIZE_DATA3D*2, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_DATA3D*2),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*2, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_REF3D*2),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } else if (translation_num <= blocks3D*4) { if (even_orientation_num != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*4, D2C_EULERS_PER_BLOCK_DATA3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D),dim3(D2C_BLOCK_SIZE_DATA3D*4),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*4, D2C_EULERS_PER_BLOCK_REF3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D),dim3(D2C_BLOCK_SIZE_REF3D*4),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*4, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_DATA3D*4),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*4, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_REF3D*4),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } #ifndef CUDA_DOUBLE_PRECISION else { if (even_orientation_num != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*8, D2C_EULERS_PER_BLOCK_DATA3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D),dim3(D2C_BLOCK_SIZE_DATA3D*8),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*8, D2C_EULERS_PER_BLOCK_REF3D, 4>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D),dim3(D2C_BLOCK_SIZE_REF3D*8),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*8, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_DATA3D*8),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*8, 1, 4>) , dim3(rest),dim3(D2C_BLOCK_SIZE_REF3D*8),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } #endif } else { if (translation_num > D2C_BLOCK_SIZE_2D) { printf("Number of coarse translations larger than %d on the GPU not supported.\n", D2C_BLOCK_SIZE_2D); fflush(stdout); exit(1); } unsigned rest = orientation_num % D2C_EULERS_PER_BLOCK_2D; long unsigned even_orientation_num = orientation_num - rest; if (even_orientation_num != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<false,true, D2C_BLOCK_SIZE_2D, D2C_EULERS_PER_BLOCK_2D, 2>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_2D),dim3(D2C_BLOCK_SIZE_2D),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<false,false, D2C_BLOCK_SIZE_2D, D2C_EULERS_PER_BLOCK_2D, 2>) , dim3(even_orientation_num/D2C_EULERS_PER_BLOCK_2D),dim3(D2C_BLOCK_SIZE_2D),0,stream, d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); LAUNCH_HANDLE_ERROR(hipGetLastError()); } if (rest != 0) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<false,true, D2C_BLOCK_SIZE_2D, 1, 2>) , dim3(rest),dim3(D2C_BLOCK_SIZE_2D),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else hipLaunchKernelGGL(( cuda_kernel_diff2_coarse<false,false, D2C_BLOCK_SIZE_2D, 1, 2>) , dim3(rest),dim3(D2C_BLOCK_SIZE_2D),0,stream, &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); LAUNCH_HANDLE_ERROR(hipGetLastError()); } } } else { dim3 CCblocks(orientation_num,translation_num); if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_CC_coarse<true,true,D2C_BLOCK_SIZE_DATA3D>) , dim3(CCblocks),dim3(D2C_BLOCK_SIZE_DATA3D),0,stream, d_eulers, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, translation_num, image_size, local_sqrtXi2); else if(projector.mdlZ!=0) hipLaunchKernelGGL(( cuda_kernel_diff2_CC_coarse<true,false,D2C_BLOCK_SIZE_REF3D>) , dim3(CCblocks),dim3(D2C_BLOCK_SIZE_REF3D),0,stream, d_eulers, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, translation_num, image_size, local_sqrtXi2); else hipLaunchKernelGGL(( cuda_kernel_diff2_CC_coarse<false,false,D2C_BLOCK_SIZE_2D>) , dim3(CCblocks),dim3(D2C_BLOCK_SIZE_2D),0,stream, d_eulers, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, translation_num, image_size, local_sqrtXi2); LAUNCH_HANDLE_ERROR(hipGetLastError()); } } void runDiff2KernelFine( CudaProjectorKernel &projector, XFLOAT *corr_img, XFLOAT *Fimgs_real, XFLOAT *Fimgs_imag, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT *eulers, long unsigned *rot_id, long unsigned *rot_idx, long unsigned *trans_idx, long unsigned *job_idx, long unsigned *job_num, XFLOAT *diff2s, OptimisationParamters &op, MlOptimiser *baseMLO, long unsigned orientation_num, long unsigned translation_num, long unsigned significant_num, unsigned image_size, int ipart, int exp_iclass, hipStream_t stream, long unsigned job_num_count, bool do_CC, bool data_is_3D) { dim3 block_dim = job_num_count; if(!do_CC) { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_fine<true,true, D2F_BLOCK_SIZE_DATA3D, D2F_CHUNK_DATA3D>) , dim3(block_dim),dim3(D2F_BLOCK_SIZE_DATA3D),0,stream, eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, // in these non-CC kernels this is effectively an adjusted MinvSigma2 diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else if(projector.mdlZ!=0) hipLaunchKernelGGL(( cuda_kernel_diff2_fine<true,false,D2F_BLOCK_SIZE_REF3D,D2F_CHUNK_REF3D>) , dim3(block_dim),dim3(D2F_BLOCK_SIZE_REF3D),0,stream, eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, // in these non-CC kernels this is effectively an adjusted MinvSigma2 diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else hipLaunchKernelGGL(( cuda_kernel_diff2_fine<false,false,D2F_BLOCK_SIZE_2D,D2F_CHUNK_2D>) , dim3(block_dim),dim3(D2F_BLOCK_SIZE_2D),0,stream, eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, // in these non-CC kernels this is effectively an adjusted MinvSigma2 diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); LAUNCH_HANDLE_ERROR(hipGetLastError()); } else { if(data_is_3D) hipLaunchKernelGGL(( cuda_kernel_diff2_CC_fine<true,true,D2F_BLOCK_SIZE_DATA3D,D2F_CHUNK_DATA3D>) , dim3(block_dim),dim3(D2F_BLOCK_SIZE_DATA3D),0,stream, eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., (XFLOAT) op.local_sqrtXi2[ipart], orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else if(projector.mdlZ!=0) hipLaunchKernelGGL(( cuda_kernel_diff2_CC_fine<true,false,D2F_BLOCK_SIZE_REF3D,D2F_CHUNK_REF3D>) , dim3(block_dim),dim3(D2F_BLOCK_SIZE_REF3D),0,stream, eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., (XFLOAT) op.local_sqrtXi2[ipart], orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else hipLaunchKernelGGL(( cuda_kernel_diff2_CC_fine<false,false,D2F_BLOCK_SIZE_2D,D2F_CHUNK_2D>) , dim3(block_dim),dim3(D2F_BLOCK_SIZE_2D),0,stream, eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., (XFLOAT) op.local_sqrtXi2[ipart], orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); LAUNCH_HANDLE_ERROR(hipGetLastError()); } } void runCollect2jobs( dim3 grid_dim, XFLOAT * oo_otrans_x, // otrans-size -> make const XFLOAT * oo_otrans_y, // otrans-size -> make const XFLOAT * oo_otrans_z, // otrans-size -> make const XFLOAT * myp_oo_otrans_x2y2z2, // otrans-size -> make const XFLOAT * weights, XFLOAT significant_weight, XFLOAT sum_weight, unsigned long nr_trans, unsigned long nr_oversampled_trans, unsigned long nr_oversampled_rot, int oversamples, bool skip_rots, XFLOAT * p_weights, XFLOAT * p_thr_wsum_prior_offsetx_class, XFLOAT * p_thr_wsum_prior_offsety_class, XFLOAT * p_thr_wsum_prior_offsetz_class, XFLOAT * p_thr_wsum_sigma2_offset, size_t * rot_idx, size_t * trans_idx, size_t * jobOrigin, size_t * jobExtent, bool data_is_3D ) { if(data_is_3D) { size_t shared_buffer = sizeof(XFLOAT)*SUMW_BLOCK_SIZE*5; // x+y+z+myp+weights hipLaunchKernelGGL(( cuda_kernel_collect2jobs<true>), dim3(grid_dim),dim3(SUMW_BLOCK_SIZE),shared_buffer, 0, oo_otrans_x, // otrans-size -> make const oo_otrans_y, // otrans-size -> make const oo_otrans_z, // otrans-size -> make const myp_oo_otrans_x2y2z2, // otrans-size -> make const weights, significant_weight, sum_weight, nr_trans, nr_oversampled_trans, nr_oversampled_rot, oversamples, skip_rots, p_weights, p_thr_wsum_prior_offsetx_class, p_thr_wsum_prior_offsety_class, p_thr_wsum_prior_offsetz_class, p_thr_wsum_sigma2_offset, rot_idx, trans_idx, jobOrigin, jobExtent); } else { size_t shared_buffer = sizeof(XFLOAT)*SUMW_BLOCK_SIZE*4; // x+y+myp+weights hipLaunchKernelGGL(( cuda_kernel_collect2jobs<false>), dim3(grid_dim),dim3(SUMW_BLOCK_SIZE),shared_buffer, 0, oo_otrans_x, // otrans-size -> make const oo_otrans_y, // otrans-size -> make const oo_otrans_z, // otrans-size -> make const myp_oo_otrans_x2y2z2, // otrans-size -> make const weights, significant_weight, sum_weight, nr_trans, nr_oversampled_trans, nr_oversampled_rot, oversamples, skip_rots, p_weights, p_thr_wsum_prior_offsetx_class, p_thr_wsum_prior_offsety_class, p_thr_wsum_prior_offsetz_class, p_thr_wsum_sigma2_offset, rot_idx, trans_idx, jobOrigin, jobExtent); } } //void windowFourierTransform2( // XFLOAT *d_in_real, // XFLOAT *d_in_imag, // XFLOAT *d_out_real, // XFLOAT *d_out_imag, // unsigned iX, unsigned iY, unsigned iZ, //Input dimensions // unsigned oX, unsigned oY, unsigned oZ, //Output dimensions // hipStream_t stream // ) //{ // if (iX > 1 && iY/2 + 1 != iX) // REPORT_ERROR("windowFourierTransform ERROR: the Fourier transform should be of an image with equal sizes in all dimensions!"); // // if (oY == iX) // REPORT_ERROR("windowFourierTransform ERROR: there is a one-to-one map between input and output!"); // // cudaMemInit<XFLOAT>( d_out_real, 0, (size_t) oX*oY*oZ, stream ); // cudaMemInit<XFLOAT>( d_out_imag, 0, (size_t) oX*oY*oZ, stream ); // // if (oY > iX) // { // long int max_r2 = (iX - 1) * (iX - 1); // // unsigned grid_dim = ceil((float)(iX*iY*iZ) / (float) WINDOW_FT_BLOCK_SIZE); // cuda_kernel_window_fourier_transform<true><<< grid_dim, WINDOW_FT_BLOCK_SIZE, 0, stream >>>( // d_in_real, // d_in_imag, // d_out_real, // d_out_imag, // iX, iY, iZ, iX * iY, //Input dimensions // oX, oY, oZ, oX * oY, //Output dimensions // iX*iY*iZ, // max_r2 ); // } // else // { // unsigned grid_dim = ceil((float)(oX*oY*oZ) / (float) WINDOW_FT_BLOCK_SIZE); // cuda_kernel_window_fourier_transform<false><<< grid_dim, WINDOW_FT_BLOCK_SIZE, 0, stream >>>( // d_in_real, // d_in_imag, // d_out_real, // d_out_imag, // iX, iY, iZ, iX * iY, //Input dimensions // oX, oY, oZ, oX * oY, //Output dimensions // oX*oY*oZ); // } //} void windowFourierTransform2( CudaGlobalPtr<CUDACOMPLEX > &d_in, CudaGlobalPtr<CUDACOMPLEX > &d_out, size_t iX, size_t iY, size_t iZ, //Input dimensions size_t oX, size_t oY, size_t oZ, //Output dimensions size_t Npsi, size_t pos, hipStream_t stream) { if (iX > 1 && iY/2 + 1 != iX) REPORT_ERROR("windowFourierTransform ERROR: the Fourier transform should be of an image with equal sizes in all dimensions!"); // if (oX == iX) // REPORT_ERROR("windowFourierTransform ERROR: there is a one-to-one map between input and output!"); deviceInitComplexValue(d_out, (XFLOAT)0.); HANDLE_ERROR(hipStreamSynchronize(d_out.getStream())); if(oX==iX) { HANDLE_ERROR(hipStreamSynchronize(d_in.getStream())); cudaCpyDeviceToDevice(&d_in.d_ptr[pos], ~d_out, oX*oY*oZ*Npsi, d_out.getStream() ); return; } if (oX > iX) { long int max_r2 = (iX - 1) * (iX - 1); dim3 grid_dim(ceil((float)(iX*iY*iZ) / (float) WINDOW_FT_BLOCK_SIZE),Npsi); hipLaunchKernelGGL(( cuda_kernel_window_fourier_transform<true>), dim3(grid_dim), dim3(WINDOW_FT_BLOCK_SIZE), 0, d_out.getStream() , &d_in.d_ptr[pos], d_out.d_ptr, iX, iY, iZ, iX * iY, //Input dimensions oX, oY, oZ, oX * oY, //Output dimensions iX*iY*iZ, max_r2 ); LAUNCH_HANDLE_ERROR(hipGetLastError()); } else { dim3 grid_dim(ceil((float)(oX*oY*oZ) / (float) WINDOW_FT_BLOCK_SIZE),Npsi); hipLaunchKernelGGL(( cuda_kernel_window_fourier_transform<false>), dim3(grid_dim), dim3(WINDOW_FT_BLOCK_SIZE), 0, d_out.getStream() , &d_in.d_ptr[pos], d_out.d_ptr, iX, iY, iZ, iX * iY, //Input dimensions oX, oY, oZ, oX * oY, //Output dimensions oX*oY*oZ); LAUNCH_HANDLE_ERROR(hipGetLastError()); } } void selfApplyBeamTilt2(MultidimArray<Complex > &Fimg, RFLOAT beamtilt_x, RFLOAT beamtilt_y, RFLOAT wavelength, RFLOAT Cs, RFLOAT angpix, int ori_size) { if (Fimg.getDim() != 2) REPORT_ERROR("applyBeamTilt can only be done on 2D Fourier Transforms!"); RFLOAT boxsize = angpix * ori_size; RFLOAT factor = 0.360 * Cs * 10000000 * wavelength * wavelength / (boxsize * boxsize * boxsize); for (unsigned n = 0 ; n < Fimg.yxdim; n ++) { unsigned i = n / Fimg.xdim; unsigned j = n % Fimg.xdim; unsigned jp = j; int ip = i < Fimg.xdim ? i : i - Fimg.ydim; RFLOAT delta_phase = factor * (ip * ip + jp * jp) * (ip * beamtilt_y + jp * beamtilt_x); RFLOAT realval = Fimg.data[i*Fimg.xdim+j].real; RFLOAT imagval = Fimg.data[i*Fimg.xdim+j].imag; RFLOAT mag = sqrt(realval * realval + imagval * imagval); RFLOAT phas = atan2(imagval, realval) + DEG2RAD(delta_phase); // apply phase shift! realval = mag * cos(phas); imagval = mag * sin(phas); Fimg.data[i*Fimg.xdim+j] = Complex(realval, imagval); } }
19709014c543e72b0fd87d79fa3272b201501f85.cu
#include <cuda_runtime.h> #include "src/gpu_utils/cuda_settings.h" #include "src/gpu_utils/cuda_helper_functions.cuh" #include "src/gpu_utils/cuda_kernels/BP.cuh" #include "src/macros.h" #include "src/error.h" long int makeJobsForDiff2Fine( OptimisationParamters &op, SamplingParameters &sp, long int orientation_num, long int translation_num, ProjectionParams &FineProjectionData, std::vector< long unsigned > &iover_transes, std::vector< long unsigned > &ihiddens, long int nr_over_orient, long int nr_over_trans, int ipart, IndexedDataArray &FPW, // FPW=FinePassWeights IndexedDataArrayMask &dataMask, int chunk) { long int w_base = dataMask.firstPos, w(0), k(0); // be on the safe side with the jobArrays: make them as large as they could possibly be // (this will be reduced at exit of this function) dataMask.setNumberOfJobs(orientation_num*translation_num); dataMask.setNumberOfWeights(orientation_num*translation_num); dataMask.jobOrigin.host_alloc(); dataMask.jobExtent.host_alloc(); dataMask.jobOrigin[k]=0; for (long unsigned i = 0; i < orientation_num; i++) { dataMask.jobExtent[k]=0; int tk=0; long int iover_rot = FineProjectionData.iover_rots[i]; for (long unsigned j = 0; j < translation_num; j++) { long int iover_trans = iover_transes[j]; long int ihidden = FineProjectionData.iorientclasses[i] * sp.nr_trans + ihiddens[j]; if(DIRECT_A2D_ELEM(op.Mcoarse_significant, ipart, ihidden)==1) { FPW.rot_id[w_base+w] = FineProjectionData.iorientclasses[i] % (sp.nr_dir*sp.nr_psi); // where to look for priors etc FPW.rot_idx[w_base+w] = i; // which rot for this significant task FPW.trans_idx[w_base+w] = j; // which trans - || - FPW.ihidden_overs[w_base+w]= (ihidden * nr_over_orient + iover_rot) * nr_over_trans + iover_trans; if(tk>=chunk) { tk=0; // reset counter k++; // use new element dataMask.jobOrigin[k]=w; dataMask.jobExtent[k]=0; // prepare next element for ++ incrementing } tk++; // increment limit-checker dataMask.jobExtent[k]++; // increment number of transes this job w++; } else if(tk!=0) // start a new one with the same rotidx - we expect transes to be sequential. { tk=0; // reset counter k++; // use new element dataMask.jobOrigin[k]=w; dataMask.jobExtent[k]=0; // prepare next element for ++ incrementing } } if(tk>0) // use new element (if tk==0) then we are currently on an element with no signif, so we should continue using this element { k++; dataMask.jobOrigin[k]=w; dataMask.jobExtent[k]=0; } } if(dataMask.jobExtent[k]!=0) // if we started putting somehting in last element, then the count is one higher than the index k+=1; dataMask.setNumberOfJobs(k); dataMask.setNumberOfWeights(w); // if(dataMask.weightNum>0) // { // dataMask.jobOrigin.device_alloc(); // dataMask.jobExtent.device_alloc(); // } return(w); } int makeJobsForCollect(IndexedDataArray &FPW, IndexedDataArrayMask &dataMask, unsigned long NewJobNum) // FPW=FinePassWeights { // reset the old (diff2Fine) job-definitions // dataMask.jobOrigin.free_host(); // dataMask.jobOrigin.free_device(); // dataMask.jobExtent.free_host(); // dataMask.jobExtent.free_device(); dataMask.setNumberOfJobs(NewJobNum); // dataMask.jobOrigin.host_alloc(); // dataMask.jobExtent.host_alloc(); long int jobid=0; dataMask.jobOrigin[jobid]=0; dataMask.jobExtent[jobid]=1; long int crot =FPW.rot_idx[jobid]; // set current rot for(long int n=1; n<FPW.rot_idx.size; n++) { if(FPW.rot_idx[n]==crot) { dataMask.jobExtent[jobid]++; } else { jobid++; dataMask.jobExtent[jobid]=1; dataMask.jobOrigin[jobid]=n; crot=FPW.rot_idx[n]; } } dataMask.setNumberOfJobs(jobid+1); // because max index is one less than size // dataMask.jobOrigin.put_on_device(); // dataMask.jobExtent.put_on_device(); return (jobid+1); } /* * Maps weights to a decoupled indexing of translations and orientations */ void mapWeights( unsigned long orientation_start, XFLOAT *mapped_weights, unsigned orientation_num, unsigned long idxArr_start, unsigned long idxArr_end, unsigned translation_num, XFLOAT *weights, long unsigned *rot_idx, long unsigned *trans_idx, unsigned long current_oversampling) { for (long unsigned i = 0; i < orientation_num*translation_num; i++) mapped_weights[i] = -999.; for (long unsigned i = idxArr_start; i < idxArr_end; i++) mapped_weights[ (rot_idx[i]-orientation_start) * translation_num + trans_idx[i] ]= weights[i]; } void buildCorrImage(MlOptimiser *baseMLO, OptimisationParamters &op, CudaGlobalPtr<XFLOAT> &corr_img, long int ipart, long int group_id) { // CC or not if((baseMLO->iter == 1 && baseMLO->do_firstiter_cc) || baseMLO->do_always_cc) for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] = 1. / (op.local_sqrtXi2[ipart]*op.local_sqrtXi2[ipart]); else for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] = *(op.local_Minvsigma2s[ipart].data + i ); // ctf-correction or not ( NOTE this is not were the difference metric is ctf-corrected, but // rather where we apply the additional correction to make the GPU-specific arithmetic equal // to the CPU method) if (baseMLO->do_ctf_correction && baseMLO->refs_are_ctf_corrected) for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] *= DIRECT_MULTIDIM_ELEM(op.local_Fctfs[ipart], i)*DIRECT_MULTIDIM_ELEM(op.local_Fctfs[ipart], i); // scale-correction or not ( NOTE this is not were the difference metric is scale-corrected, but // rather where we apply the additional correction to make the GPU-specific arithmetic equal // to the CPU method) XFLOAT myscale = baseMLO->mymodel.scale_correction[group_id]; if (baseMLO->do_scale_correction) for(int i = 0; i < corr_img.getSize(); i++) corr_img[i] *= myscale * myscale; } void generateEulerMatrices( XFLOAT padding_factor, ProjectionParams &ProjectionData, XFLOAT *eulers, bool inverse) { RFLOAT alpha, beta, gamma; RFLOAT ca, sa, cb, sb, cg, sg; RFLOAT cc, cs, sc, ss; for (long int i = 0; i < ProjectionData.rots.size(); i++) { //TODO In a sense we're doing RAD2DEG just to do DEG2RAD here. //The only place the degree value is actually used is in the metadata assignment. alpha = DEG2RAD(ProjectionData.rots[i]); beta = DEG2RAD(ProjectionData.tilts[i]); gamma = DEG2RAD(ProjectionData.psis[i]); sincos(alpha, &sa, &ca); sincos(beta, &sb, &cb); sincos(gamma, &sg, &cg); cc = cb * ca; cs = cb * sa; sc = sb * ca; ss = sb * sa; if(inverse) { eulers[9 * i + 0] = ( cg * cc - sg * sa) ;// * padding_factor; //00 eulers[9 * i + 1] = (-sg * cc - cg * sa) ;// * padding_factor; //10 eulers[9 * i + 2] = ( sc ) ;// * padding_factor; //20 eulers[9 * i + 3] = ( cg * cs + sg * ca) ;// * padding_factor; //01 eulers[9 * i + 4] = (-sg * cs + cg * ca) ;// * padding_factor; //11 eulers[9 * i + 5] = ( ss ) ;// * padding_factor; //21 eulers[9 * i + 6] = (-cg * sb ) ;// * padding_factor; //02 eulers[9 * i + 7] = ( sg * sb ) ;// * padding_factor; //12 eulers[9 * i + 8] = ( cb ) ;// * padding_factor; //22 } else { eulers[9 * i + 0] = ( cg * cc - sg * sa) ;// * padding_factor; //00 eulers[9 * i + 1] = ( cg * cs + sg * ca) ;// * padding_factor; //01 eulers[9 * i + 2] = (-cg * sb ) ;// * padding_factor; //02 eulers[9 * i + 3] = (-sg * cc - cg * sa) ;// * padding_factor; //10 eulers[9 * i + 4] = (-sg * cs + cg * ca) ;// * padding_factor; //11 eulers[9 * i + 5] = ( sg * sb ) ;// * padding_factor; //12 eulers[9 * i + 6] = ( sc ) ;// * padding_factor; //20 eulers[9 * i + 7] = ( ss ) ;// * padding_factor; //21 eulers[9 * i + 8] = ( cb ) ;// * padding_factor; //22 } } } long unsigned generateProjectionSetupFine( OptimisationParamters &op, SamplingParameters &sp, MlOptimiser *baseMLO, unsigned iclass, ProjectionParams &ProjectionData) // FIXME : For coarse iteration this is **SLOW** HERE ARE SOME NOTES FOR PARALLELIZING IT (GPU OFFLOAD): /* * Since it is based on push_back, parallelizing sould be fine given som atomic opreation appends, * what takes time is looping through all this. The job-splitting in collect2jobs-preproccesing and * divideOrientationsIntoBlockjobs() relies on chunks of shared orientations being adjacent in * ProjectionData.rot_id (and thus also .rot_idx), but does not care which order those chunks appear * in. So as long as a parallelilsm and "atomic push_back" is organised to use an orientation as a * minimum unit, the job-splitting should be fine with the output. */ { //Local variables std::vector< RFLOAT > oversampled_rot, oversampled_tilt, oversampled_psi; long int orientation_num = 0; for (long int idir = sp.idir_min, iorient = 0; idir <= sp.idir_max; idir++) { for (long int ipsi = sp.ipsi_min, ipart = 0; ipsi <= sp.ipsi_max; ipsi++, iorient++) { long int iorientclass = iclass * sp.nr_dir * sp.nr_psi + iorient; if (baseMLO->isSignificantAnyParticleAnyTranslation(iorientclass, sp.itrans_min, sp.itrans_max, op.Mcoarse_significant)) { // Now get the oversampled (rot, tilt, psi) triplets // This will be only the original (rot,tilt,psi) triplet in the first pass (sp.current_oversampling==0) baseMLO->sampling.getOrientations(idir, ipsi, sp.current_oversampling, oversampled_rot, oversampled_tilt, oversampled_psi, op.pointer_dir_nonzeroprior, op.directions_prior, op.pointer_psi_nonzeroprior, op.psi_prior); // Loop over all oversampled orientations (only a single one in the first pass) for (long int iover_rot = 0; iover_rot < sp.nr_oversampled_rot; iover_rot++, ipart++) { ProjectionData.pushBackAll( (long unsigned)iclass, oversampled_rot[iover_rot], oversampled_tilt[iover_rot], oversampled_psi[iover_rot], iorientclass, iover_rot ); orientation_num ++; } } } } ProjectionData.orientation_num[iclass]=orientation_num; return orientation_num; } void runWavgKernel( CudaProjectorKernel &projector, XFLOAT *eulers, XFLOAT *Fimg_real, XFLOAT *Fimg_imag, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT *sorted_weights, XFLOAT *ctfs, XFLOAT *wdiff2s_parts, XFLOAT *wdiff2s_AA, XFLOAT *wdiff2s_XA, OptimisationParamters &op, long unsigned orientation_num, long unsigned translation_num, unsigned image_size, long int ipart, int group_id, int exp_iclass, XFLOAT part_scale, bool refs_are_ctf_corrected, bool data_is_3D, cudaStream_t stream) { //We only want as many blocks as there are chunks of orientations to be treated //within the same block (this is done to reduce memory loads in the kernel). dim3 block_dim = orientation_num;//ceil((float)orientation_num/(float)REF_GROUP_SIZE); //cudaFuncSetCacheConfig(cuda_kernel_wavg_fast, cudaFuncCachePreferShared); if (refs_are_ctf_corrected) { if(data_is_3D) cuda_kernel_wavg<true,true,true,WAVG_BLOCK_SIZE_DATA3D><<<block_dim,WAVG_BLOCK_SIZE_DATA3D,(3*WAVG_BLOCK_SIZE_DATA3D+9)*sizeof(XFLOAT),stream>>>( eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else if (projector.mdlZ!=0) cuda_kernel_wavg<true,true,false,WAVG_BLOCK_SIZE><<<block_dim,WAVG_BLOCK_SIZE,(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream>>>( eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else cuda_kernel_wavg<true,false,false,WAVG_BLOCK_SIZE><<<block_dim,WAVG_BLOCK_SIZE,(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream>>>( eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); } else { if(data_is_3D) cuda_kernel_wavg<false,true,true,WAVG_BLOCK_SIZE_DATA3D><<<block_dim,WAVG_BLOCK_SIZE_DATA3D,(3*WAVG_BLOCK_SIZE_DATA3D+9)*sizeof(XFLOAT),stream>>>( eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else if (projector.mdlZ!=0) cuda_kernel_wavg<false,true,false,WAVG_BLOCK_SIZE><<<block_dim,WAVG_BLOCK_SIZE,(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream>>>( eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); else cuda_kernel_wavg<false,false,false,WAVG_BLOCK_SIZE><<<block_dim,WAVG_BLOCK_SIZE,(3*WAVG_BLOCK_SIZE+9)*sizeof(XFLOAT),stream>>>( eulers, projector, image_size, orientation_num, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, sorted_weights, ctfs, wdiff2s_parts, wdiff2s_AA, wdiff2s_XA, translation_num, (XFLOAT) op.sum_weight[ipart], (XFLOAT) op.significant_weight[ipart], part_scale ); } LAUNCH_HANDLE_ERROR(cudaGetLastError()); } void runBackProjectKernel( CudaBackprojector &BP, CudaProjectorKernel &projector, XFLOAT *d_img_real, XFLOAT *d_img_imag, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT* d_weights, XFLOAT* d_Minvsigma2s, XFLOAT* d_ctfs, unsigned long translation_num, XFLOAT significant_weight, XFLOAT weight_norm, XFLOAT *d_eulers, int imgX, int imgY, int imgZ, unsigned long imageCount, bool data_is_3D, bool do_sgd, cudaStream_t optStream) { if(BP.mdlZ==1) { cuda_kernel_backproject2D<<<imageCount,BP_2D_BLOCK_SIZE,0,optStream>>>( d_img_real, d_img_imag, trans_x, trans_y, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgX*imgY, BP.mdlX, BP.mdlInitY); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } else { if(do_sgd) { if(data_is_3D) cuda_kernel_backprojectSGD<true><<<imageCount,BP_DATA3D_BLOCK_SIZE,0,optStream>>>( projector, d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); else cuda_kernel_backprojectSGD<false><<<imageCount,BP_REF3D_BLOCK_SIZE,0,optStream>>>( projector, d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); } else { if(data_is_3D) cuda_kernel_backproject3D<true><<<imageCount,BP_DATA3D_BLOCK_SIZE,0,optStream>>>( d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); else cuda_kernel_backproject3D<false><<<imageCount,BP_REF3D_BLOCK_SIZE,0,optStream>>>( d_img_real, d_img_imag, trans_x, trans_y, trans_z, d_weights, d_Minvsigma2s, d_ctfs, translation_num, significant_weight, weight_norm, d_eulers, BP.d_mdlReal, BP.d_mdlImag, BP.d_mdlWeight, BP.maxR, BP.maxR2, BP.padding_factor, imgX, imgY, imgZ, imgX*imgY*imgZ, BP.mdlX, BP.mdlY, BP.mdlInitY, BP.mdlInitZ); } LAUNCH_HANDLE_ERROR(cudaGetLastError()); } } __global__ void cuda_kernel_allweights_to_mweights( unsigned long * d_iorient, XFLOAT * d_allweights, XFLOAT * d_mweights, unsigned long orientation_num, unsigned long translation_num ) { size_t idx = blockIdx.x * WEIGHT_MAP_BLOCK_SIZE + threadIdx.x; if (idx < orientation_num*translation_num) d_mweights[d_iorient[idx/translation_num] * translation_num + idx%translation_num] = d_allweights[idx]; //d_mweights[idx/translation_num] = //d_allweights[idx/translation_num * translation_num + idx%translation_num]; } void mapAllWeightsToMweights( unsigned long * d_iorient, //projectorPlan.iorientclasses XFLOAT * d_allweights, //allWeights XFLOAT * d_mweights, //Mweight unsigned long orientation_num, //projectorPlan.orientation_num unsigned long translation_num, //translation_num cudaStream_t stream ) { int grid_size = ceil((float)(orientation_num*translation_num)/(float)WEIGHT_MAP_BLOCK_SIZE); cuda_kernel_allweights_to_mweights<<< grid_size, WEIGHT_MAP_BLOCK_SIZE, 0, stream >>>( d_iorient, d_allweights, d_mweights, orientation_num, translation_num); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } void compareWeights( unsigned long * d_iorient, //projectorPlan.iorientclasses XFLOAT * d_allweights, //allWeights XFLOAT * d_mweights, //Mweight unsigned long orientation_num, //projectorPlan.orientation_num unsigned long translation_num, //translation_num cudaStream_t stream ) { int grid_size = ceil((float)(orientation_num*translation_num)/(float)WEIGHT_MAP_BLOCK_SIZE); cuda_kernel_allweights_to_mweights<<< grid_size, WEIGHT_MAP_BLOCK_SIZE, 0, stream >>>( d_iorient, d_allweights, d_mweights, orientation_num, translation_num); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } size_t findThresholdIdxInCumulativeSum(CudaGlobalPtr<XFLOAT> &data, XFLOAT threshold) { int grid_size = ceil((float)(data.getSize()-1)/(float)FIND_IN_CUMULATIVE_BLOCK_SIZE); if(grid_size==0) { return(0); } else { CudaGlobalPtr<size_t > idx(1, data.getStream(), data.getAllocator()); idx[0] = 0; idx.put_on_device(); cuda_kernel_find_threshold_idx_in_cumulative<<< grid_size, FIND_IN_CUMULATIVE_BLOCK_SIZE, 0, data.getStream() >>>( ~data, threshold, data.getSize()-1, ~idx); idx.cp_to_host(); DEBUG_HANDLE_ERROR(cudaStreamSynchronize(data.getStream())); return idx[0]; } } void runDiff2KernelCoarse( CudaProjectorKernel &projector, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT *corr_img, XFLOAT *Fimg_real, XFLOAT *Fimg_imag, XFLOAT *d_eulers, XFLOAT *diff2s, XFLOAT local_sqrtXi2, long unsigned orientation_num, int translation_num, int image_size, cudaStream_t stream, bool do_CC, bool data_is_3D) { const int blocks3D = (data_is_3D? D2C_BLOCK_SIZE_DATA3D : D2C_BLOCK_SIZE_REF3D); if(!do_CC) { if(projector.mdlZ!=0) { #ifdef CUDA_DOUBLE_PRECISION if (translation_num > blocks3D*4) CRITICAL(ERR_TRANSLIM); #else if (translation_num > blocks3D*8) CRITICAL(ERR_TRANSLIM); #endif unsigned rest = orientation_num % blocks3D; long unsigned even_orientation_num = orientation_num - rest; if (translation_num <= blocks3D) { if (even_orientation_num != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D, D2C_EULERS_PER_BLOCK_DATA3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D,D2C_BLOCK_SIZE_DATA3D,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D, D2C_EULERS_PER_BLOCK_REF3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D,D2C_BLOCK_SIZE_REF3D,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D, 1, 4> <<<rest,D2C_BLOCK_SIZE_DATA3D,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D, 1, 4> <<<rest,D2C_BLOCK_SIZE_REF3D,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } else if (translation_num <= blocks3D*2) { if (even_orientation_num != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*2, D2C_EULERS_PER_BLOCK_DATA3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D,D2C_BLOCK_SIZE_DATA3D*2,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*2, D2C_EULERS_PER_BLOCK_REF3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D,D2C_BLOCK_SIZE_REF3D*2,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true, true, D2C_BLOCK_SIZE_DATA3D*2, 1, 4> <<<rest,D2C_BLOCK_SIZE_DATA3D*2,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*2, 1, 4> <<<rest,D2C_BLOCK_SIZE_REF3D*2,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } else if (translation_num <= blocks3D*4) { if (even_orientation_num != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*4, D2C_EULERS_PER_BLOCK_DATA3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D,D2C_BLOCK_SIZE_DATA3D*4,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*4, D2C_EULERS_PER_BLOCK_REF3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D,D2C_BLOCK_SIZE_REF3D*4,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*4, 1, 4> <<<rest,D2C_BLOCK_SIZE_DATA3D*4,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*4, 1, 4> <<<rest,D2C_BLOCK_SIZE_REF3D*4,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } #ifndef CUDA_DOUBLE_PRECISION else { if (even_orientation_num != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*8, D2C_EULERS_PER_BLOCK_DATA3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_DATA3D,D2C_BLOCK_SIZE_DATA3D*8,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*8, D2C_EULERS_PER_BLOCK_REF3D, 4> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_REF3D,D2C_BLOCK_SIZE_REF3D*8,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); } if (rest != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<true,true, D2C_BLOCK_SIZE_DATA3D*8, 1, 4> <<<rest,D2C_BLOCK_SIZE_DATA3D*8,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else cuda_kernel_diff2_coarse<true,false, D2C_BLOCK_SIZE_REF3D*8, 1, 4> <<<rest,D2C_BLOCK_SIZE_REF3D*8,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); } } #endif } else { if (translation_num > D2C_BLOCK_SIZE_2D) { printf("Number of coarse translations larger than %d on the GPU not supported.\n", D2C_BLOCK_SIZE_2D); fflush(stdout); exit(1); } unsigned rest = orientation_num % D2C_EULERS_PER_BLOCK_2D; long unsigned even_orientation_num = orientation_num - rest; if (even_orientation_num != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<false,true, D2C_BLOCK_SIZE_2D, D2C_EULERS_PER_BLOCK_2D, 2> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_2D,D2C_BLOCK_SIZE_2D,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); else cuda_kernel_diff2_coarse<false,false, D2C_BLOCK_SIZE_2D, D2C_EULERS_PER_BLOCK_2D, 2> <<<even_orientation_num/D2C_EULERS_PER_BLOCK_2D,D2C_BLOCK_SIZE_2D,0,stream>>>( d_eulers, trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, diff2s, translation_num, image_size); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } if (rest != 0) { if(data_is_3D) cuda_kernel_diff2_coarse<false,true, D2C_BLOCK_SIZE_2D, 1, 2> <<<rest,D2C_BLOCK_SIZE_2D,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); else cuda_kernel_diff2_coarse<false,false, D2C_BLOCK_SIZE_2D, 1, 2> <<<rest,D2C_BLOCK_SIZE_2D,0,stream>>>( &d_eulers[9*even_orientation_num], trans_x, trans_y, trans_z, Fimg_real, Fimg_imag, projector, corr_img, &diff2s[translation_num*even_orientation_num], translation_num, image_size); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } } } else { dim3 CCblocks(orientation_num,translation_num); if(data_is_3D) cuda_kernel_diff2_CC_coarse<true,true,D2C_BLOCK_SIZE_DATA3D> <<<CCblocks,D2C_BLOCK_SIZE_DATA3D,0,stream>>>( d_eulers, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, translation_num, image_size, local_sqrtXi2); else if(projector.mdlZ!=0) cuda_kernel_diff2_CC_coarse<true,false,D2C_BLOCK_SIZE_REF3D> <<<CCblocks,D2C_BLOCK_SIZE_REF3D,0,stream>>>( d_eulers, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, translation_num, image_size, local_sqrtXi2); else cuda_kernel_diff2_CC_coarse<false,false,D2C_BLOCK_SIZE_2D> <<<CCblocks,D2C_BLOCK_SIZE_2D,0,stream>>>( d_eulers, Fimg_real, Fimg_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, translation_num, image_size, local_sqrtXi2); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } } void runDiff2KernelFine( CudaProjectorKernel &projector, XFLOAT *corr_img, XFLOAT *Fimgs_real, XFLOAT *Fimgs_imag, XFLOAT *trans_x, XFLOAT *trans_y, XFLOAT *trans_z, XFLOAT *eulers, long unsigned *rot_id, long unsigned *rot_idx, long unsigned *trans_idx, long unsigned *job_idx, long unsigned *job_num, XFLOAT *diff2s, OptimisationParamters &op, MlOptimiser *baseMLO, long unsigned orientation_num, long unsigned translation_num, long unsigned significant_num, unsigned image_size, int ipart, int exp_iclass, cudaStream_t stream, long unsigned job_num_count, bool do_CC, bool data_is_3D) { dim3 block_dim = job_num_count; if(!do_CC) { if(data_is_3D) cuda_kernel_diff2_fine<true,true, D2F_BLOCK_SIZE_DATA3D, D2F_CHUNK_DATA3D> <<<block_dim,D2F_BLOCK_SIZE_DATA3D,0,stream>>>( eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, // in these non-CC kernels this is effectively an adjusted MinvSigma2 diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else if(projector.mdlZ!=0) cuda_kernel_diff2_fine<true,false,D2F_BLOCK_SIZE_REF3D,D2F_CHUNK_REF3D> <<<block_dim,D2F_BLOCK_SIZE_REF3D,0,stream>>>( eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, // in these non-CC kernels this is effectively an adjusted MinvSigma2 diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else cuda_kernel_diff2_fine<false,false,D2F_BLOCK_SIZE_2D,D2F_CHUNK_2D> <<<block_dim,D2F_BLOCK_SIZE_2D,0,stream>>>( eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, // in these non-CC kernels this is effectively an adjusted MinvSigma2 diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } else { if(data_is_3D) cuda_kernel_diff2_CC_fine<true,true,D2F_BLOCK_SIZE_DATA3D,D2F_CHUNK_DATA3D> <<<block_dim,D2F_BLOCK_SIZE_DATA3D,0,stream>>>( eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., (XFLOAT) op.local_sqrtXi2[ipart], orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else if(projector.mdlZ!=0) cuda_kernel_diff2_CC_fine<true,false,D2F_BLOCK_SIZE_REF3D,D2F_CHUNK_REF3D> <<<block_dim,D2F_BLOCK_SIZE_REF3D,0,stream>>>( eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., (XFLOAT) op.local_sqrtXi2[ipart], orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); else cuda_kernel_diff2_CC_fine<false,false,D2F_BLOCK_SIZE_2D,D2F_CHUNK_2D> <<<block_dim,D2F_BLOCK_SIZE_2D,0,stream>>>( eulers, Fimgs_real, Fimgs_imag, trans_x, trans_y, trans_z, projector, corr_img, diff2s, image_size, op.highres_Xi2_imgs[ipart] / 2., (XFLOAT) op.local_sqrtXi2[ipart], orientation_num, translation_num, job_num_count, //significant_num, rot_idx, trans_idx, job_idx, job_num); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } } void runCollect2jobs( dim3 grid_dim, XFLOAT * oo_otrans_x, // otrans-size -> make const XFLOAT * oo_otrans_y, // otrans-size -> make const XFLOAT * oo_otrans_z, // otrans-size -> make const XFLOAT * myp_oo_otrans_x2y2z2, // otrans-size -> make const XFLOAT * weights, XFLOAT significant_weight, XFLOAT sum_weight, unsigned long nr_trans, unsigned long nr_oversampled_trans, unsigned long nr_oversampled_rot, int oversamples, bool skip_rots, XFLOAT * p_weights, XFLOAT * p_thr_wsum_prior_offsetx_class, XFLOAT * p_thr_wsum_prior_offsety_class, XFLOAT * p_thr_wsum_prior_offsetz_class, XFLOAT * p_thr_wsum_sigma2_offset, size_t * rot_idx, size_t * trans_idx, size_t * jobOrigin, size_t * jobExtent, bool data_is_3D ) { if(data_is_3D) { size_t shared_buffer = sizeof(XFLOAT)*SUMW_BLOCK_SIZE*5; // x+y+z+myp+weights cuda_kernel_collect2jobs<true><<<grid_dim,SUMW_BLOCK_SIZE,shared_buffer>>>( oo_otrans_x, // otrans-size -> make const oo_otrans_y, // otrans-size -> make const oo_otrans_z, // otrans-size -> make const myp_oo_otrans_x2y2z2, // otrans-size -> make const weights, significant_weight, sum_weight, nr_trans, nr_oversampled_trans, nr_oversampled_rot, oversamples, skip_rots, p_weights, p_thr_wsum_prior_offsetx_class, p_thr_wsum_prior_offsety_class, p_thr_wsum_prior_offsetz_class, p_thr_wsum_sigma2_offset, rot_idx, trans_idx, jobOrigin, jobExtent); } else { size_t shared_buffer = sizeof(XFLOAT)*SUMW_BLOCK_SIZE*4; // x+y+myp+weights cuda_kernel_collect2jobs<false><<<grid_dim,SUMW_BLOCK_SIZE,shared_buffer>>>( oo_otrans_x, // otrans-size -> make const oo_otrans_y, // otrans-size -> make const oo_otrans_z, // otrans-size -> make const myp_oo_otrans_x2y2z2, // otrans-size -> make const weights, significant_weight, sum_weight, nr_trans, nr_oversampled_trans, nr_oversampled_rot, oversamples, skip_rots, p_weights, p_thr_wsum_prior_offsetx_class, p_thr_wsum_prior_offsety_class, p_thr_wsum_prior_offsetz_class, p_thr_wsum_sigma2_offset, rot_idx, trans_idx, jobOrigin, jobExtent); } } //void windowFourierTransform2( // XFLOAT *d_in_real, // XFLOAT *d_in_imag, // XFLOAT *d_out_real, // XFLOAT *d_out_imag, // unsigned iX, unsigned iY, unsigned iZ, //Input dimensions // unsigned oX, unsigned oY, unsigned oZ, //Output dimensions // cudaStream_t stream // ) //{ // if (iX > 1 && iY/2 + 1 != iX) // REPORT_ERROR("windowFourierTransform ERROR: the Fourier transform should be of an image with equal sizes in all dimensions!"); // // if (oY == iX) // REPORT_ERROR("windowFourierTransform ERROR: there is a one-to-one map between input and output!"); // // cudaMemInit<XFLOAT>( d_out_real, 0, (size_t) oX*oY*oZ, stream ); // cudaMemInit<XFLOAT>( d_out_imag, 0, (size_t) oX*oY*oZ, stream ); // // if (oY > iX) // { // long int max_r2 = (iX - 1) * (iX - 1); // // unsigned grid_dim = ceil((float)(iX*iY*iZ) / (float) WINDOW_FT_BLOCK_SIZE); // cuda_kernel_window_fourier_transform<true><<< grid_dim, WINDOW_FT_BLOCK_SIZE, 0, stream >>>( // d_in_real, // d_in_imag, // d_out_real, // d_out_imag, // iX, iY, iZ, iX * iY, //Input dimensions // oX, oY, oZ, oX * oY, //Output dimensions // iX*iY*iZ, // max_r2 ); // } // else // { // unsigned grid_dim = ceil((float)(oX*oY*oZ) / (float) WINDOW_FT_BLOCK_SIZE); // cuda_kernel_window_fourier_transform<false><<< grid_dim, WINDOW_FT_BLOCK_SIZE, 0, stream >>>( // d_in_real, // d_in_imag, // d_out_real, // d_out_imag, // iX, iY, iZ, iX * iY, //Input dimensions // oX, oY, oZ, oX * oY, //Output dimensions // oX*oY*oZ); // } //} void windowFourierTransform2( CudaGlobalPtr<CUDACOMPLEX > &d_in, CudaGlobalPtr<CUDACOMPLEX > &d_out, size_t iX, size_t iY, size_t iZ, //Input dimensions size_t oX, size_t oY, size_t oZ, //Output dimensions size_t Npsi, size_t pos, cudaStream_t stream) { if (iX > 1 && iY/2 + 1 != iX) REPORT_ERROR("windowFourierTransform ERROR: the Fourier transform should be of an image with equal sizes in all dimensions!"); // if (oX == iX) // REPORT_ERROR("windowFourierTransform ERROR: there is a one-to-one map between input and output!"); deviceInitComplexValue(d_out, (XFLOAT)0.); HANDLE_ERROR(cudaStreamSynchronize(d_out.getStream())); if(oX==iX) { HANDLE_ERROR(cudaStreamSynchronize(d_in.getStream())); cudaCpyDeviceToDevice(&d_in.d_ptr[pos], ~d_out, oX*oY*oZ*Npsi, d_out.getStream() ); return; } if (oX > iX) { long int max_r2 = (iX - 1) * (iX - 1); dim3 grid_dim(ceil((float)(iX*iY*iZ) / (float) WINDOW_FT_BLOCK_SIZE),Npsi); cuda_kernel_window_fourier_transform<true><<< grid_dim, WINDOW_FT_BLOCK_SIZE, 0, d_out.getStream() >>>( &d_in.d_ptr[pos], d_out.d_ptr, iX, iY, iZ, iX * iY, //Input dimensions oX, oY, oZ, oX * oY, //Output dimensions iX*iY*iZ, max_r2 ); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } else { dim3 grid_dim(ceil((float)(oX*oY*oZ) / (float) WINDOW_FT_BLOCK_SIZE),Npsi); cuda_kernel_window_fourier_transform<false><<< grid_dim, WINDOW_FT_BLOCK_SIZE, 0, d_out.getStream() >>>( &d_in.d_ptr[pos], d_out.d_ptr, iX, iY, iZ, iX * iY, //Input dimensions oX, oY, oZ, oX * oY, //Output dimensions oX*oY*oZ); LAUNCH_HANDLE_ERROR(cudaGetLastError()); } } void selfApplyBeamTilt2(MultidimArray<Complex > &Fimg, RFLOAT beamtilt_x, RFLOAT beamtilt_y, RFLOAT wavelength, RFLOAT Cs, RFLOAT angpix, int ori_size) { if (Fimg.getDim() != 2) REPORT_ERROR("applyBeamTilt can only be done on 2D Fourier Transforms!"); RFLOAT boxsize = angpix * ori_size; RFLOAT factor = 0.360 * Cs * 10000000 * wavelength * wavelength / (boxsize * boxsize * boxsize); for (unsigned n = 0 ; n < Fimg.yxdim; n ++) { unsigned i = n / Fimg.xdim; unsigned j = n % Fimg.xdim; unsigned jp = j; int ip = i < Fimg.xdim ? i : i - Fimg.ydim; RFLOAT delta_phase = factor * (ip * ip + jp * jp) * (ip * beamtilt_y + jp * beamtilt_x); RFLOAT realval = Fimg.data[i*Fimg.xdim+j].real; RFLOAT imagval = Fimg.data[i*Fimg.xdim+j].imag; RFLOAT mag = sqrt(realval * realval + imagval * imagval); RFLOAT phas = atan2(imagval, realval) + DEG2RAD(delta_phase); // apply phase shift! realval = mag * cos(phas); imagval = mag * sin(phas); Fimg.data[i*Fimg.xdim+j] = Complex(realval, imagval); } }
0c997d099e7c522e310bba88403837d016ab3270.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/dsymv_mgpu.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ahmad Abdelfattah * @date 2018-11-14 **/ #include "syhemv_mgpu_core.cuh" #include "syhemv_mgpu_offset_core.cuh" #include "kblas_defs.h" #if(TARGET_SM >= 30) #define dsymv_upper_bs (32) #define dsymv_upper_ty (4) #define dsymv_lower_bs (32) #define dsymv_lower_ty (2) #else #define dsymv_upper_bs (64) #define dsymv_upper_ty (8) #define dsymv_lower_bs (64) #define dsymv_lower_ty (8) #endif int kblas_dsymv_mgpu_driver( char uplo, int m, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int ngpus, int gpu_gid, hipStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ /** * If you change the configuration parameters, * you must revise the case statement of the upper case * to make sure it covers all the possible cases **/ const int dsymv_bs = dsymv_upper_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_upper_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_upper_by = 2*ngpus; /** end configuration params **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_upper_by); //if (mod == 0) mod = dsymv_bs; if(mod == 0) { hipLaunchKernelGGL(( syhemvu_mgpu_special_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); hipLaunchKernelGGL(( syhemvu_mgpu_special_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); } else { hipLaunchKernelGGL(( syhemvu_mgpu_generic_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); // for the non-diagonal part choose between a templatized irregular part or a variable one const int irregular_part = mod % elements_per_thread; if(0) {} else { // Templatized irregular_part /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 1:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 2:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 3:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 4:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 5:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 6:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 7:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 8:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; // return error otherwise: default: printf("DSYMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int dsymv_bs = dsymv_lower_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_lower_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_lower_by = 2*ngpus; // design rule, feel free to change it /** end configuration params **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_lower_by); if(mod == 0) { hipLaunchKernelGGL(( syhemvl_mgpu_special_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); hipLaunchKernelGGL(( syhemvl_mgpu_special_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); } else { hipLaunchKernelGGL(( syhemvl_mgpu_generic_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); hipLaunchKernelGGL(( syhemvl_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ int kblas_dsymv_mgpu_driver_offset( char uplo, int m, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int ngpus, int gpu_gid, int offset, hipStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ const int dsymv_bs = dsymv_upper_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_upper_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_upper_by = 2*ngpus; // design rule, feel free to change it /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % dsymv_bs; int total_blocks_skipped = offset / dsymv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * dsymv_bs * lda; dA += total_blocks_skipped * dsymv_bs; dX += total_blocks_skipped * dsymv_bs * incx; dY += total_blocks_skipped * dsymv_bs * incy; m -= total_blocks_skipped * dsymv_bs; /** end offset necessary calculation **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_upper_by); if(mod == 0) { hipLaunchKernelGGL(( syhemvu_mgpu_special_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); hipLaunchKernelGGL(( syhemvu_mgpu_special_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); } else { hipLaunchKernelGGL(( syhemvu_mgpu_generic_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); const int irregular_part = mod % elements_per_thread; /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 1:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 2:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 3:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 4:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 5:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 6:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 7:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 8:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; // return error otherwise: default: printf("DSYMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int dsymv_bs = dsymv_lower_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_lower_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_lower_by = 2*ngpus; // design rule, feel free to change it /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % dsymv_bs; int total_blocks_skipped = offset / dsymv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * dsymv_bs * lda; dA += total_blocks_skipped * dsymv_bs; dX += total_blocks_skipped * dsymv_bs * incx; dY += total_blocks_skipped * dsymv_bs * incy; m -= total_blocks_skipped * dsymv_bs; /** end offset necessary calculation **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_lower_by); if(mod == 0) { hipLaunchKernelGGL(( syhemvl_mgpu_special_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); hipLaunchKernelGGL(( syhemvl_mgpu_special_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); } else { hipLaunchKernelGGL(( syhemvl_mgpu_generic_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); hipLaunchKernelGGL(( syhemvl_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ extern "C" int kblas_dsymv_mgpu( char uplo, int m, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset) { const int ngpus_local = ngpus; if(offset == 0) { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i]); } } else { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset); } } // wait for gpus to finish for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); hipDeviceSynchronize(); } return 0; } /*************************************************************************************/ extern "C" int kblas_dsymv_mgpu_async( char uplo, int m, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset, hipStream_t stream[MAX_NGPUS][MAX_STREAMS]) { const int ngpus_local = ngpus; if(offset == 0) { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], stream[i][0]); } } else { for(int i = 0; i < ngpus_local; i++) { hipSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset, stream[i][0]); } } return 0; } /*************************************************************************************/ extern "C" int get_dsymv_mgpu_bs(char uplo) { if(uplo == 'l' || uplo == 'L') return dsymv_lower_bs; else if (uplo == 'u' || uplo == 'U') return dsymv_upper_bs; else {printf("Error .. input %c is not supported for symv \n", uplo); return -1;} }
0c997d099e7c522e310bba88403837d016ab3270.cu
/** * @copyright (c) 2012- King Abdullah University of Science and * Technology (KAUST). All rights reserved. **/ /** * @file src/blas_l2/dsymv_mgpu.cu * KBLAS is a high performance CUDA library for subset of BLAS * and LAPACK routines optimized for NVIDIA GPUs. * KBLAS is provided by KAUST. * * @version 3.0.0 * @author Ahmad Abdelfattah * @date 2018-11-14 **/ #include "syhemv_mgpu_core.cuh" #include "syhemv_mgpu_offset_core.cuh" #include "kblas_defs.h" #if(TARGET_SM >= 30) #define dsymv_upper_bs (32) #define dsymv_upper_ty (4) #define dsymv_lower_bs (32) #define dsymv_lower_ty (2) #else #define dsymv_upper_bs (64) #define dsymv_upper_ty (8) #define dsymv_lower_bs (64) #define dsymv_lower_ty (8) #endif int kblas_dsymv_mgpu_driver( char uplo, int m, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int ngpus, int gpu_gid, cudaStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ /** * If you change the configuration parameters, * you must revise the case statement of the upper case * to make sure it covers all the possible cases **/ const int dsymv_bs = dsymv_upper_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_upper_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_upper_by = 2*ngpus; /** end configuration params **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_upper_by); //if (mod == 0) mod = dsymv_bs; if(mod == 0) { syhemvu_mgpu_special_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); syhemvu_mgpu_special_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); } else { syhemvu_mgpu_generic_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); // for the non-diagonal part choose between a templatized irregular part or a variable one const int irregular_part = mod % elements_per_thread; if(0) {} else { // Templatized irregular_part /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 1: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 2: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 3: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 4: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 5: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 6: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 7: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; case 8: syhemvu_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break; // return error otherwise: default: printf("DSYMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int dsymv_bs = dsymv_lower_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_lower_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_lower_by = 2*ngpus; // design rule, feel free to change it /** end configuration params **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_lower_by); if(mod == 0) { syhemvl_mgpu_special_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); syhemvl_mgpu_special_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes); } else { syhemvl_mgpu_generic_d<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); syhemvl_mgpu_generic_nd<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ int kblas_dsymv_mgpu_driver_offset( char uplo, int m, double alpha, double *dA, int lda, double *dX, int incx, double beta, double *dY, int incy, int ngpus, int gpu_gid, int offset, cudaStream_t stream = 0) { // handle the case when incx and/or incy is -ve if(incx < 0) dX -= (m-1) * incx; if(incy < 0) dY -= (m-1) * incy; if(uplo == 'U' || uplo == 'u') { /** configuration params **/ const int dsymv_bs = dsymv_upper_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_upper_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_upper_by = 2*ngpus; // design rule, feel free to change it /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % dsymv_bs; int total_blocks_skipped = offset / dsymv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * dsymv_bs * lda; dA += total_blocks_skipped * dsymv_bs; dX += total_blocks_skipped * dsymv_bs * incx; dY += total_blocks_skipped * dsymv_bs * incy; m -= total_blocks_skipped * dsymv_bs; /** end offset necessary calculation **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_upper_by); if(mod == 0) { syhemvu_mgpu_special_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); syhemvu_mgpu_special_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); } else { syhemvu_mgpu_generic_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); const int irregular_part = mod % elements_per_thread; /** * The upper case kernel for irregular dimensions has an extra template parameter. * This parameter must be among the values listed in the switch-case statement below. * The possible values are in the range 0 - (elements_per_thread-1) * Make sure these values are updated whenever you change the configuration parameters. **/ switch(irregular_part) { case 0: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 1: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 2: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 3: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 4: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 5: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 6: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 7: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; case 8: syhemvu_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break; // return error otherwise: default: printf("DSYMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1; } } } else if(uplo == 'L' || uplo == 'l') { /** configuration params **/ const int dsymv_bs = dsymv_lower_bs; const int thread_x = dsymv_bs; const int thread_y = dsymv_lower_ty; const int elements_per_thread = (dsymv_bs/(2*thread_y)) ; const int dsymv_lower_by = 2*ngpus; // design rule, feel free to change it /** end configuration params **/ /** offset necessary calculation **/ int offset_ = offset % dsymv_bs; int total_blocks_skipped = offset / dsymv_bs; int my_skipped_blocks = total_blocks_skipped/ngpus; if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1; int ref_gpu = total_blocks_skipped%ngpus; int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus; // Advance pointers accordingly dA += my_skipped_blocks * dsymv_bs * lda; dA += total_blocks_skipped * dsymv_bs; dX += total_blocks_skipped * dsymv_bs * incx; dY += total_blocks_skipped * dsymv_bs * incy; m -= total_blocks_skipped * dsymv_bs; /** end offset necessary calculation **/ int mod = m % dsymv_bs; int nstripes = m / dsymv_bs + (mod != 0); int blocks = nstripes/ngpus; if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1; dim3 dimBlock(thread_x, thread_y); dim3 dimGrid(blocks,1); dim3 dimGrid_(blocks, dsymv_lower_by); if(mod == 0) { syhemvl_mgpu_special_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); syhemvl_mgpu_special_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_); } else { syhemvl_mgpu_generic_d_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); syhemvl_mgpu_generic_nd_offset<double, dsymv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); } } else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;} return 0; } /*************************************************************************************/ extern "C" int kblas_dsymv_mgpu( char uplo, int m, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset) { const int ngpus_local = ngpus; if(offset == 0) { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i]); } } else { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset); } } // wait for gpus to finish for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); cudaDeviceSynchronize(); } return 0; } /*************************************************************************************/ extern "C" int kblas_dsymv_mgpu_async( char uplo, int m, double alpha, double **dA, int lda, double **dX, int incx, double beta, double **dY, int incy, int ngpus, int offset, cudaStream_t stream[MAX_NGPUS][MAX_STREAMS]) { const int ngpus_local = ngpus; if(offset == 0) { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], stream[i][0]); } } else { for(int i = 0; i < ngpus_local; i++) { cudaSetDevice(gpu_lid[i]); kblas_dsymv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset, stream[i][0]); } } return 0; } /*************************************************************************************/ extern "C" int get_dsymv_mgpu_bs(char uplo) { if(uplo == 'l' || uplo == 'L') return dsymv_lower_bs; else if (uplo == 'u' || uplo == 'U') return dsymv_upper_bs; else {printf("Error .. input %c is not supported for symv \n", uplo); return -1;} }
fcd1c9c0446183f9abe2dacd96901f3ef1b6c494.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <cstdlib> #include <ctime> #include <chrono> #include <rocblas.h> #include <cstdio> #include <stdlib.h> #define imin(a, b) (a<b?a:b) const int blockSize = 16; // Naive matrix multiplication // m x k matrix A, k x n matrix B, m x n matrix C = A x B __global__ void gpu_matrix_multi(double *matA, double *matB, double *matC, int m, int k, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0.0; if (idx < n && idy < m) { for (int i = 0; i < k; i++) { sum += matA[idy*k + i] * matB[i*n + idx]; } matC[idy*n + idx] = sum; idx += blockDim.x * gridDim.x; idy += blockDim.y * gridDim.y; } } // Matrix multiplication by cuBLAS // m x k matrix A, k x n matrix B, m x n matrix C = A x B // tran(C) = tran(B) x tran(A) void gpu_blas_multi(double *matB, double *matA, double *matC, int m, int k, int n) { int lda = n, ldb = k, ldc = n; double alf = 1.0; double bet = 0.0; double *alpha = &alf; double *beta = &bet; // Create a handle for cuBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Do the actual multiplication hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, alpha, matB, lda, matA, ldb, beta, matC, ldc); // Destroy the handle hipblasDestroy(handle); } // Naive cpu matrix multiplication // m x k matrix A, k x n matrix B, m x n matrix C = A x B void cpu_matrix_multi(double *matA, double *matB, double *matC, int m, int k, int n) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { double tmp = 0.0; for (int l = 0; l < k; l++) { tmp += matA[i*k + l] * matB[l*n + j]; } matC[i*n + j] = tmp; } } } // Matrix multiplication by BLAS // m x k matrix A, k x n matrix B, m x n matrix C = A x B // tran(C) = tran(B) x tran(A) extern "C"{ // product C= alphaA.B + betaC void dgemm_(char* TRANSA, char* TRANSB, const int* M, const int* N, const int* K, double* alpha, double* A, const int* LDA, double* B, const int* LDB, double* beta, double* C, const int* LDC); } void initvec(double* v, int N){ for(int i= 0; i<N; ++i){ v[i]= 0.0; } } void cpu_blas_multi(double *matB, double *matA, double *matC, int m, int k, int n) { double alpha= 1.0, beta= 0.0; char no= 'N', tr= 'T'; int m1 = m, k1 = k, n1 = n, lda= n, incx= k, incy= n; double* tmp= new double[m*n]; initvec(tmp, m*n); dgemm_(&no, &no, &n1, &m1, &k1, &alpha, matB, &lda, matA, &incx, &beta, tmp, &incy); for(int i= 0; i<m*n; ++i){ matC[i]= tmp[i]; } delete [] tmp; } int main(void) { int m = 1<<15, k = 1<<10, n = 1<<15; // int m = 100, k = 1<< 20, n = 100; int grid_cols = imin(1024, (n + blockSize - 1)/blockSize); int grid_rows = imin(1024, (m + blockSize - 1)/blockSize); dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(grid_cols, grid_rows, 1); // Allocate memory in host RAM double *h_matA = new double[m*k]; double *h_matB = new double[k*n]; double *h_matC = new double[m*n]; double *h_matC_cpu = new double[m*n]; // Initialize h_mat_in std::srand(1103); for (int i = 0; i < m; i++) for (int j = 0; j < k; j++) h_matA[i*k+j] = double(std::rand())/double(RAND_MAX); for (int i = 0; i < k; i++) for (int j = 0; j < n; j++) h_matB[i*n+j] = double(std::rand())/double(RAND_MAX); // capture the GPU start time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Allocate memory on device double *d_matA, *d_matB, *d_matC; hipMalloc(&d_matA, m*k*sizeof(double)); hipMalloc(&d_matB, k*n*sizeof(double)); hipMalloc(&d_matC, m*n*sizeof(double)); // Copy matrix in from host to device // auto wallGPU0 = std::chrono::system_clock::now(); hipMemcpy(d_matA, h_matA, m*k*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_matB, h_matB, k*n*sizeof(double), hipMemcpyHostToDevice); // Run kernel // gpu_matrix_multi<<<dimGrid, dimBlock>>>(d_matA, d_matB, d_matC, m, k, n); gpu_blas_multi(d_matB, d_matA, d_matC, m, k, n); // Copy result from device to host hipMemcpy(h_matC, d_matC, m*n*sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // std::chrono::duration<double> wallGPUduration = (std::chrono::system_clock::now() - wallGPU0); // get GPU stop time hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); // cout << "Time to generate: " << elapsedTime << " ms." << endl; hipEventDestroy(start); hipEventDestroy(stop); // Check results auto wallCPU0 = std::chrono::system_clock::now(); // cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, m, k, n); cpu_blas_multi(h_matB, h_matA, h_matC_cpu, m, k, n); std::chrono::duration<double> wallCPUduration = (std::chrono::system_clock::now() - wallCPU0); int check_flag = 1; double resol = 1e-5; for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) if (fabs(h_matC[i*n + j] - h_matC_cpu[i*n + j]) > resol) check_flag = 0; if (!check_flag) std::cout << "GPU matrix multiplication not success!!!" << std::endl; else { std::cout << "GPU matrix multiplication success!!!" << std::endl; std::cout << "GPU matrix multiplication by cublas costs: " << elapsedTime << " ms." << std::endl; std::cout << "CPU matrix multiplication by blas costs: " << wallCPUduration.count() << " s." << std::endl; } // Free memory hipFree(d_matA); hipFree(d_matB); hipFree(d_matC); delete [] h_matA; delete [] h_matB; delete [] h_matC; delete [] h_matC_cpu; return 0; }
fcd1c9c0446183f9abe2dacd96901f3ef1b6c494.cu
#include <iostream> #include <math.h> #include <cstdlib> #include <ctime> #include <chrono> #include <cublas_v2.h> #include <cstdio> #include <stdlib.h> #define imin(a, b) (a<b?a:b) const int blockSize = 16; // Naive matrix multiplication // m x k matrix A, k x n matrix B, m x n matrix C = A x B __global__ void gpu_matrix_multi(double *matA, double *matB, double *matC, int m, int k, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0.0; if (idx < n && idy < m) { for (int i = 0; i < k; i++) { sum += matA[idy*k + i] * matB[i*n + idx]; } matC[idy*n + idx] = sum; idx += blockDim.x * gridDim.x; idy += blockDim.y * gridDim.y; } } // Matrix multiplication by cuBLAS // m x k matrix A, k x n matrix B, m x n matrix C = A x B // tran(C) = tran(B) x tran(A) void gpu_blas_multi(double *matB, double *matA, double *matC, int m, int k, int n) { int lda = n, ldb = k, ldc = n; double alf = 1.0; double bet = 0.0; double *alpha = &alf; double *beta = &bet; // Create a handle for cuBLAS cublasHandle_t handle; cublasCreate(&handle); // Do the actual multiplication cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, alpha, matB, lda, matA, ldb, beta, matC, ldc); // Destroy the handle cublasDestroy(handle); } // Naive cpu matrix multiplication // m x k matrix A, k x n matrix B, m x n matrix C = A x B void cpu_matrix_multi(double *matA, double *matB, double *matC, int m, int k, int n) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { double tmp = 0.0; for (int l = 0; l < k; l++) { tmp += matA[i*k + l] * matB[l*n + j]; } matC[i*n + j] = tmp; } } } // Matrix multiplication by BLAS // m x k matrix A, k x n matrix B, m x n matrix C = A x B // tran(C) = tran(B) x tran(A) extern "C"{ // product C= alphaA.B + betaC void dgemm_(char* TRANSA, char* TRANSB, const int* M, const int* N, const int* K, double* alpha, double* A, const int* LDA, double* B, const int* LDB, double* beta, double* C, const int* LDC); } void initvec(double* v, int N){ for(int i= 0; i<N; ++i){ v[i]= 0.0; } } void cpu_blas_multi(double *matB, double *matA, double *matC, int m, int k, int n) { double alpha= 1.0, beta= 0.0; char no= 'N', tr= 'T'; int m1 = m, k1 = k, n1 = n, lda= n, incx= k, incy= n; double* tmp= new double[m*n]; initvec(tmp, m*n); dgemm_(&no, &no, &n1, &m1, &k1, &alpha, matB, &lda, matA, &incx, &beta, tmp, &incy); for(int i= 0; i<m*n; ++i){ matC[i]= tmp[i]; } delete [] tmp; } int main(void) { int m = 1<<15, k = 1<<10, n = 1<<15; // int m = 100, k = 1<< 20, n = 100; int grid_cols = imin(1024, (n + blockSize - 1)/blockSize); int grid_rows = imin(1024, (m + blockSize - 1)/blockSize); dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(grid_cols, grid_rows, 1); // Allocate memory in host RAM double *h_matA = new double[m*k]; double *h_matB = new double[k*n]; double *h_matC = new double[m*n]; double *h_matC_cpu = new double[m*n]; // Initialize h_mat_in std::srand(1103); for (int i = 0; i < m; i++) for (int j = 0; j < k; j++) h_matA[i*k+j] = double(std::rand())/double(RAND_MAX); for (int i = 0; i < k; i++) for (int j = 0; j < n; j++) h_matB[i*n+j] = double(std::rand())/double(RAND_MAX); // capture the GPU start time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Allocate memory on device double *d_matA, *d_matB, *d_matC; cudaMalloc(&d_matA, m*k*sizeof(double)); cudaMalloc(&d_matB, k*n*sizeof(double)); cudaMalloc(&d_matC, m*n*sizeof(double)); // Copy matrix in from host to device // auto wallGPU0 = std::chrono::system_clock::now(); cudaMemcpy(d_matA, h_matA, m*k*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_matB, h_matB, k*n*sizeof(double), cudaMemcpyHostToDevice); // Run kernel // gpu_matrix_multi<<<dimGrid, dimBlock>>>(d_matA, d_matB, d_matC, m, k, n); gpu_blas_multi(d_matB, d_matA, d_matC, m, k, n); // Copy result from device to host cudaMemcpy(h_matC, d_matC, m*n*sizeof(double), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // std::chrono::duration<double> wallGPUduration = (std::chrono::system_clock::now() - wallGPU0); // get GPU stop time cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); // cout << "Time to generate: " << elapsedTime << " ms." << endl; cudaEventDestroy(start); cudaEventDestroy(stop); // Check results auto wallCPU0 = std::chrono::system_clock::now(); // cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, m, k, n); cpu_blas_multi(h_matB, h_matA, h_matC_cpu, m, k, n); std::chrono::duration<double> wallCPUduration = (std::chrono::system_clock::now() - wallCPU0); int check_flag = 1; double resol = 1e-5; for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) if (fabs(h_matC[i*n + j] - h_matC_cpu[i*n + j]) > resol) check_flag = 0; if (!check_flag) std::cout << "GPU matrix multiplication not success!!!" << std::endl; else { std::cout << "GPU matrix multiplication success!!!" << std::endl; std::cout << "GPU matrix multiplication by cublas costs: " << elapsedTime << " ms." << std::endl; std::cout << "CPU matrix multiplication by blas costs: " << wallCPUduration.count() << " s." << std::endl; } // Free memory cudaFree(d_matA); cudaFree(d_matB); cudaFree(d_matC); delete [] h_matA; delete [] h_matB; delete [] h_matC; delete [] h_matC_cpu; return 0; }
32a641416182d6a485c4b18200a3e6dc03bde013.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __global__ void is_eq(bool *a, bool *b, bool *ans){ int gid = getGid3d3d(); ans[0] = true; if (a[gid] != b[gid]){ ans[0] = false; } }
32a641416182d6a485c4b18200a3e6dc03bde013.cu
#include "includes.h" __device__ unsigned int getGid3d3d(){ int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.y * blockDim.x) + (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x; return threadId; } __global__ void is_eq(bool *a, bool *b, bool *ans){ int gid = getGid3d3d(); ans[0] = true; if (a[gid] != b[gid]){ ans[0] = false; } }
6720a8e5a3410a87a35e5e41b09470d79873d0cc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <algorithm> #define EXTERN extern #include "timestep.cuh" #include "collectiveops.cuh" #include "../diagnostics_c.h" #include "ddiagsextern.cuh" #include "dfdf.cuh" #include "../density_c.h" #include "../hydro_c.h" #include "../cparam_c.h" #include "defines_dims_PC.h" #include "../cdata_c.h" #include "defines_PC.h" #include "../viscosity_c.h" #include "../eos_c.h" //using namespace PC; //---------------------------------------------------------- // Calculate Courant timestep for the system. //---------------------------------------------------------- // Takes the domain's device pointers as parameters and return a new timestep // d_umax = a single float containing the max velocity of the computational domain (allocated in compute.cu) // d_partial_result = temporary array containing the partial results of the reduction (allocated in compute.cu) float timestep_cuda(float* d_umax, float* d_partial_result, float* d_uu_x, float* d_uu_y, float* d_uu_z) { //MV: It is better to calculate dt within the CPU after we get umax from max_vec_cuda, //MV: because we need the information in the CPU lever anyway // Determine the correct time step for the system static float dt, umax, uu_dt, visc_dt; //Initialize only once (static var lifetime == entire program) //Get uu max to d_umax max_vec_cuda(d_umax, d_partial_result, d_uu_x, d_uu_y, d_uu_z); hipDeviceSynchronize(); hipMemcpy(&umax, (float*)d_umax, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("UMAX: %F\n", umax); //Get dsmin (cannot be defined in defines.h) static float dsmin = DX; if (dsmin > DY) dsmin = DY; if (dsmin > DZ) dsmin = DZ; //Courant timesteps //MV: DSMIN not yet defined in the constants. uu_dt = CDT*(dsmin/(umax + CS_SOUND)); //MV: This stays actually conctant now, but if we add phenomena //MV: like hyperviscosty and shock viscosity this could change visc_dt = CDTV*dsmin*dsmin / NU_VISC; //TODO inverse NU_VISC in defines.h if (uu_dt < visc_dt) { dt = uu_dt; } else { dt = visc_dt; } return dt; } void get_maxscal_from_device(float & maxscal,float *d_src) { max_scal_cuda(d_scaldiag, d_partial_result, d_src); hipDeviceSynchronize(); hipMemcpy(&maxscal, d_scaldiag, sizeof(float), hipMemcpyDeviceToHost); } void get_minscal_from_device(float &minscal,float *d_src) { min_scal_cuda(d_scaldiag, d_partial_result, d_src); hipDeviceSynchronize(); hipMemcpy(&minscal, d_scaldiag, sizeof(float), hipMemcpyDeviceToHost); //hipDeviceSynchronize(); } /*void timeseries_diagnostics_cuda(float* d_umax, float* d_umin, float* d_urms, float* d_uxrms, float* d_uyrms, float* d_uzrms, float* d_rhorms, */ void timeseries_diagnostics_cuda(int step, float dt, double t) { //Calculate, print and save all of the diagnostic variables calculated within the CUDA devices. static float urms, umax, umin, diag; static float rhorms; static float uxrms, uyrms, uzrms; if (idiag_uxmax>0) { get_maxscal_from_device(diag,d_uu_x); save_name(diag,idiag_uxmax); } if (idiag_uymax>0) { get_maxscal_from_device(diag,d_uu_y); save_name(diag,idiag_uymax); } if (idiag_uzmax>0) { get_maxscal_from_device(diag,d_uu_z); save_name(diag,idiag_uzmax); } if (idiag_uxmin>0) { get_minscal_from_device(diag,d_uu_x); save_name(diag,idiag_uxmin); } if (idiag_uymin>0) { get_minscal_from_device(diag,d_uu_y); save_name(diag,idiag_uymin); } if (idiag_uzmin>0) { get_minscal_from_device(diag,d_uu_z); save_name(diag,idiag_uzmin); } if (idiag_umax>0) { max_vec_cuda(d_umax, d_partial_result, d_uu_x, d_uu_y, d_uu_z); hipDeviceSynchronize(); hipMemcpy(&umax, (float*)d_umax, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("umax= %f\n", umax); save_name(umax,idiag_umax); } if (idiag_rhomax>0) { get_maxscal_from_device(diag,d_lnrho); if (!ldensity_nolog) diag = exp(diag); //Change away from the logarithmic form save_name(diag,idiag_rhomax); } if (idiag_rhomin>0) { get_minscal_from_device(diag,d_lnrho); if (!ldensity_nolog) diag = exp(diag); //Change away from the logarithmic form save_name(diag,idiag_rhomin); } if (idiag_umin>0) { min_vec_cuda(d_umin, d_partial_result, d_uu_x, d_uu_y, d_uu_z); hipDeviceSynchronize(); hipMemcpy(&umin, (float*)d_umin, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } if (idiag_urms){ vec_rms_cuda(d_urms, d_partial_result, d_uu_x, d_uu_y, d_uu_z); hipDeviceSynchronize(); hipMemcpy(&urms, (float*)d_urms, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } if (idiag_uxrms){ scal_rms_cuda(d_uxrms, d_partial_result, d_uu_x); hipDeviceSynchronize(); hipMemcpy(&uxrms, (float*)d_uxrms, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } if (idiag_uyrms){ scal_rms_cuda(d_uyrms, d_partial_result, d_uu_y); hipDeviceSynchronize(); hipMemcpy(&uyrms, (float*)d_uyrms, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } if (idiag_uzrms){ scal_rms_cuda(d_uzrms, d_partial_result, d_uu_z); hipDeviceSynchronize(); hipMemcpy(&uzrms, (float*)d_uzrms, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } if (idiag_rhorms){ scal_exp_rms_cuda(d_rhorms, d_partial_result, d_lnrho); hipDeviceSynchronize(); hipMemcpy(&rhorms, (float*)d_rhorms, sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } //if (iproc==0) //printf(" step = %i; t = %e; dt = %e; umax = %e; umin = %e; urms = %e",step, t, dt, umax, umin, urms); //printf(" step = %i; t = %e; dt = %e; umax = %e; umin = %e; urms = %e; \n uxrms = %e; uyrms = %e; uzrms = %e; \n uxmax = %e; uymax = %e; uzmax = %e; \n uxmin = %e; uymin = %e; uzmin = %e; \n rhomax = %e; rhomin = %e; rhorms = %e \n", // step, t, dt, umax, umin, urms, uxrms, uyrms, uzrms, uxmax, uymax, uzmax, uxmin, uymin, uzmin, rhomax, rhomin, rhorms); //Save the step into a file //save_ts(t, dt, step, urms, uxrms, uyrms, uzrms, uxmax, uymax, uzmax, rhorms, umax, rhomax, uxmin, uymin, uzmin, rhomin, umin); }
6720a8e5a3410a87a35e5e41b09470d79873d0cc.cu
#include <stdio.h> #include <math.h> #include <algorithm> #define EXTERN extern #include "timestep.cuh" #include "collectiveops.cuh" #include "../diagnostics_c.h" #include "ddiagsextern.cuh" #include "dfdf.cuh" #include "../density_c.h" #include "../hydro_c.h" #include "../cparam_c.h" #include "defines_dims_PC.h" #include "../cdata_c.h" #include "defines_PC.h" #include "../viscosity_c.h" #include "../eos_c.h" //using namespace PC; //---------------------------------------------------------- // Calculate Courant timestep for the system. //---------------------------------------------------------- // Takes the domain's device pointers as parameters and return a new timestep // d_umax = a single float containing the max velocity of the computational domain (allocated in compute.cu) // d_partial_result = temporary array containing the partial results of the reduction (allocated in compute.cu) float timestep_cuda(float* d_umax, float* d_partial_result, float* d_uu_x, float* d_uu_y, float* d_uu_z) { //MV: It is better to calculate dt within the CPU after we get umax from max_vec_cuda, //MV: because we need the information in the CPU lever anyway // Determine the correct time step for the system static float dt, umax, uu_dt, visc_dt; //Initialize only once (static var lifetime == entire program) //Get uu max to d_umax max_vec_cuda(d_umax, d_partial_result, d_uu_x, d_uu_y, d_uu_z); cudaDeviceSynchronize(); cudaMemcpy(&umax, (float*)d_umax, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("UMAX: %F\n", umax); //Get dsmin (cannot be defined in defines.h) static float dsmin = DX; if (dsmin > DY) dsmin = DY; if (dsmin > DZ) dsmin = DZ; //Courant timesteps //MV: DSMIN not yet defined in the constants. uu_dt = CDT*(dsmin/(umax + CS_SOUND)); //MV: This stays actually conctant now, but if we add phenomena //MV: like hyperviscosty and shock viscosity this could change visc_dt = CDTV*dsmin*dsmin / NU_VISC; //TODO inverse NU_VISC in defines.h if (uu_dt < visc_dt) { dt = uu_dt; } else { dt = visc_dt; } return dt; } void get_maxscal_from_device(float & maxscal,float *d_src) { max_scal_cuda(d_scaldiag, d_partial_result, d_src); cudaDeviceSynchronize(); cudaMemcpy(&maxscal, d_scaldiag, sizeof(float), cudaMemcpyDeviceToHost); } void get_minscal_from_device(float &minscal,float *d_src) { min_scal_cuda(d_scaldiag, d_partial_result, d_src); cudaDeviceSynchronize(); cudaMemcpy(&minscal, d_scaldiag, sizeof(float), cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); } /*void timeseries_diagnostics_cuda(float* d_umax, float* d_umin, float* d_urms, float* d_uxrms, float* d_uyrms, float* d_uzrms, float* d_rhorms, */ void timeseries_diagnostics_cuda(int step, float dt, double t) { //Calculate, print and save all of the diagnostic variables calculated within the CUDA devices. static float urms, umax, umin, diag; static float rhorms; static float uxrms, uyrms, uzrms; if (idiag_uxmax>0) { get_maxscal_from_device(diag,d_uu_x); save_name(diag,idiag_uxmax); } if (idiag_uymax>0) { get_maxscal_from_device(diag,d_uu_y); save_name(diag,idiag_uymax); } if (idiag_uzmax>0) { get_maxscal_from_device(diag,d_uu_z); save_name(diag,idiag_uzmax); } if (idiag_uxmin>0) { get_minscal_from_device(diag,d_uu_x); save_name(diag,idiag_uxmin); } if (idiag_uymin>0) { get_minscal_from_device(diag,d_uu_y); save_name(diag,idiag_uymin); } if (idiag_uzmin>0) { get_minscal_from_device(diag,d_uu_z); save_name(diag,idiag_uzmin); } if (idiag_umax>0) { max_vec_cuda(d_umax, d_partial_result, d_uu_x, d_uu_y, d_uu_z); cudaDeviceSynchronize(); cudaMemcpy(&umax, (float*)d_umax, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("umax= %f\n", umax); save_name(umax,idiag_umax); } if (idiag_rhomax>0) { get_maxscal_from_device(diag,d_lnrho); if (!ldensity_nolog) diag = exp(diag); //Change away from the logarithmic form save_name(diag,idiag_rhomax); } if (idiag_rhomin>0) { get_minscal_from_device(diag,d_lnrho); if (!ldensity_nolog) diag = exp(diag); //Change away from the logarithmic form save_name(diag,idiag_rhomin); } if (idiag_umin>0) { min_vec_cuda(d_umin, d_partial_result, d_uu_x, d_uu_y, d_uu_z); cudaDeviceSynchronize(); cudaMemcpy(&umin, (float*)d_umin, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } if (idiag_urms){ vec_rms_cuda(d_urms, d_partial_result, d_uu_x, d_uu_y, d_uu_z); cudaDeviceSynchronize(); cudaMemcpy(&urms, (float*)d_urms, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } if (idiag_uxrms){ scal_rms_cuda(d_uxrms, d_partial_result, d_uu_x); cudaDeviceSynchronize(); cudaMemcpy(&uxrms, (float*)d_uxrms, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } if (idiag_uyrms){ scal_rms_cuda(d_uyrms, d_partial_result, d_uu_y); cudaDeviceSynchronize(); cudaMemcpy(&uyrms, (float*)d_uyrms, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } if (idiag_uzrms){ scal_rms_cuda(d_uzrms, d_partial_result, d_uu_z); cudaDeviceSynchronize(); cudaMemcpy(&uzrms, (float*)d_uzrms, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } if (idiag_rhorms){ scal_exp_rms_cuda(d_rhorms, d_partial_result, d_lnrho); cudaDeviceSynchronize(); cudaMemcpy(&rhorms, (float*)d_rhorms, sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } //if (iproc==0) //printf(" step = %i; t = %e; dt = %e; umax = %e; umin = %e; urms = %e",step, t, dt, umax, umin, urms); //printf(" step = %i; t = %e; dt = %e; umax = %e; umin = %e; urms = %e; \n uxrms = %e; uyrms = %e; uzrms = %e; \n uxmax = %e; uymax = %e; uzmax = %e; \n uxmin = %e; uymin = %e; uzmin = %e; \n rhomax = %e; rhomin = %e; rhorms = %e \n", // step, t, dt, umax, umin, urms, uxrms, uyrms, uzrms, uxmax, uymax, uzmax, uxmin, uymin, uzmin, rhomax, rhomin, rhorms); //Save the step into a file //save_ts(t, dt, step, urms, uxrms, uyrms, uzrms, uxmax, uymax, uzmax, rhorms, umax, rhomax, uxmin, uymin, uzmin, rhomin, umin); }
f46373f1010ac3af7b335b7a54537d7165c91136.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/flash_attn/flash_fwd_launch_template.h> namespace pytorch_flash{ template<> void run_mha_fwd_<cutlass::half_t, 96>(Flash_fwd_params &params, hipStream_t stream) { run_mha_fwd_hdim96<cutlass::half_t>(params, stream); } } // namespace pytorch_flash
f46373f1010ac3af7b335b7a54537d7165c91136.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/flash_attn/flash_fwd_launch_template.h> namespace pytorch_flash{ template<> void run_mha_fwd_<cutlass::half_t, 96>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim96<cutlass::half_t>(params, stream); } } // namespace pytorch_flash
2096e595527c7ce0b78acfe1d1e2475177651afb.hip
// !!! This is a file automatically generated by hipify!!! /*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Created by Pawan Harish. origin: Rodinia (http://rodinia.cs.virginia.edu/doku.php) ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string> #include <cassert> #include <math.h> #include <hip/hip_runtime.h> #include "cudacommon.h" #include "ResultDatabase.h" #include "OptionParser.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Minimum nodes. (Unused) </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MIN_NODES 20 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Maximum nodes. (Unused) </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MAX_NODES ULONG_MAX //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Minimum edges in the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MIN_EDGES 2 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Maximum Initialize edges in the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MAX_INIT_EDGES 4 // Nodes will have, on average, 2*MAX_INIT_EDGES edges //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Minimum weight of an edge. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MIN_WEIGHT 1 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Maximum weight of an edge. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MAX_WEIGHT 10 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines seed for random number generator. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define SEED 7 using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A node structure in the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// struct Node { /// <summary> The starting position. </summary> int starting; /// <summary> The number of edges connected to this node. </summary> int no_of_edges; }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Initializes the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> [in,out] The no of nodes. </param> /// <param name="edge_list_size"> [in,out] Size of the edge list. </param> /// <param name="source"> [in,out] Source for the initialization. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges); //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS graph runner. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> A float. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges); #ifdef UNIFIED_MEMORY //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS graph using unified memory. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> A float. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges); #endif //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS Kernel. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="g_graph_nodes"> [in,out] If non-null, the graph nodes. </param> /// <param name="g_graph_edges"> [in,out] If non-null, the graph edges. </param> /// <param name="g_graph_mask"> [in,out] If non-null, true to graph mask. </param> /// <param name="g_updating_graph_mask"> [in,out] If non-null, true to updating graph mask. </param> /// <param name="g_graph_visited"> [in,out] If non-null, true if graph visited. </param> /// <param name="g_cost"> [in,out] If non-null, the cost. </param> /// <param name="no_of_nodes"> The no of nodes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes) { int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x; if( tid<no_of_nodes && g_graph_mask[tid]) { g_graph_mask[tid]=false; for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++) { int id = g_graph_edges[i]; if(!g_graph_visited[id]) { g_cost[id]=g_cost[tid]+1; g_updating_graph_mask[id]=true; } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS Kernel 2. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="g_graph_mask"> [in,out] If non-null, true to graph mask. </param> /// <param name="g_updating_graph_mask"> [in,out] If non-null, true to updating graph mask. </param> /// <param name="g_graph_visited"> [in,out] If non-null, true if graph visited. </param> /// <param name="g_over"> [in,out] If non-null, true to over. </param> /// <param name="no_of_nodes"> The no of nodes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Kernel2( bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes) { int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x; if( tid<no_of_nodes && g_updating_graph_mask[tid]) { g_graph_mask[tid]=true; g_graph_visited[tid]=true; *g_over=true; g_updating_graph_mask[tid]=false; } } //////////////////////////////////////////////////////////////////////////////// // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op) { } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the radix sort benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing, results are stored in resultDB // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { printf("Running BFS\n"); int device; hipGetDevice(&device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); // seed random number generator srand(SEED); int no_of_nodes = 0; int edge_list_size = 0; int source = 0; Node* h_graph_nodes; int* h_graph_edges; initGraph(op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges); // atts string for result database char tmp[64]; sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size); string atts = string(tmp); bool quiet = op.getOptionBool("quiet"); int passes = op.getOptionInt("passes"); for(int i = 0; i < passes; i++) { if(!quiet) { printf("Pass %d:\n", i); } #ifdef UNIFIED_MEMORY float timeUM = BFSGraphUnifiedMemory(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges); if(!quiet) { if(timeUM == FLT_MAX) { printf("Executing BFS using unified memory...Error.\n"); } else { printf("Executing BFS using unified memory...Done.\n"); } } //if(time != FLT_MAX && timeUM != FLT_MAX) { if(timeUM != FLT_MAX) { //resultDB.AddResult("bfs_unifiedmem_speedup", atts, "N", time/timeUM); } #else float time = BFSGraph(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges); if(!quiet) { if(time == FLT_MAX) { printf("Executing BFS...Error.\n"); } else { printf("Executing BFS...Done.\n"); } } #endif } free( h_graph_nodes); free( h_graph_edges); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Generate Uniform distribution. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="rangeLow"> The range low. </param> /// <param name="rangeHigh"> The range high. </param> /// /// <returns> A scaled random int. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// int uniform_distribution(int rangeLow, int rangeHigh) { double myRand = rand()/(1.0 + RAND_MAX); int range = rangeHigh - rangeLow + 1; int myRand_scaled = (myRand * range) + rangeLow; return myRand_scaled; } //////////////////////////////////////////////////////////////////////////////// //Initialize Graph //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Initializes the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> [in,out] The no of nodes. </param> /// <param name="edge_list_size"> [in,out] Size of the edge list. </param> /// <param name="source"> [in,out] Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges) { bool quiet = op.getOptionBool("quiet"); // open input file for reading FILE *fp = NULL; string infile = op.getOptionString("inputFile"); if(infile != "") { fp = fopen(infile.c_str(),"r"); if(!fp && !quiet) { printf("Error: Unable to read graph file %s.\n", infile.c_str()); } } if(!quiet) { if(fp) { printf("Reading graph file\n"); } else { printf("Generating graph with problem size %d\n", (int)op.getOptionInt("size")); } } // initialize number of nodes if(fp) { int n = fscanf(fp,"%d",&no_of_nodes); assert(n == 1); } else { int problemSizes[4] = {10, 50, 200, 400}; no_of_nodes = problemSizes[op.getOptionInt("size") - 1] * 1024 * 1024; } // initalize the nodes & number of edges h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); int start; int edgeno; for( int i = 0; i < no_of_nodes; i++) { if(fp) { int n = fscanf(fp,"%d %d",&start,&edgeno); assert(n == 2); } else { start = edge_list_size; edgeno = rand() % (MAX_INIT_EDGES - MIN_EDGES + 1) + MIN_EDGES; } h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; edge_list_size += edgeno; } // initialize the source node if(fp) { int n = fscanf(fp,"%d",&source); assert(n == 1); } else { source = uniform_distribution(0, no_of_nodes - 1); } source = 0; if(fp) { int edges; int n = fscanf(fp,"%d",&edges); assert(n == 1); assert(edges == edge_list_size); } // initialize the edges int id; int cost; h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); for(int i=0; i < edge_list_size ; i++) { if(fp) { int n = fscanf(fp,"%d %d",&id, &cost); assert(n == 2); } else { id = uniform_distribution(0, no_of_nodes - 1); //cost = rand() % (MAX_WEIGHT - MIN_WEIGHT + 1) + MIN_WEIGHT; } h_graph_edges[i] = id; } if(!quiet) { if(fp) { fclose(fp); printf("Done reading graph file\n"); } else { printf("Done generating graph\n"); } printf("Graph size: %d nodes, %d edges\n", no_of_nodes, edge_list_size); } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Bfs graph using CUDA. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> Transfer time and kernel time. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges) { bool verbose = op.getOptionBool("verbose"); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // allocate host memory bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes); // initalize the memory for( int i = 0; i < no_of_nodes; i++) { h_graph_mask[i]=false; h_updating_graph_mask[i]=false; h_graph_visited[i]=false; } //set the source node as true in the mask h_graph_mask[source]=true; h_graph_visited[source]=true; // allocate mem for the result on host side int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes); for(int i=0;i<no_of_nodes;i++) { h_cost[i]=-1; } h_cost[source]=0; // node list Node* d_graph_nodes; // edge list int* d_graph_edges; // mask bool* d_graph_mask; bool* d_updating_graph_mask; // visited nodes bool* d_graph_visited; // result int* d_cost; // bool if execution is over bool *d_over; CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size)); CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(hipMalloc( (void**) &d_over, sizeof(bool))); hipError_t err = hipGetLastError(); if(err != hipSuccess) { free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(d_graph_mask); hipFree(d_updating_graph_mask); hipFree(d_graph_visited); hipFree(d_cost); hipFree(d_over); return FLT_MAX; } hipEvent_t tstart, tstop; hipEventCreate(&tstart); hipEventCreate(&tstop); float elapsedTime; double transferTime = 0.; hipEventRecord(tstart, 0); hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ; hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ; hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ; hipEventRecord(tstop, 0); hipEventSynchronize(tstop); hipEventElapsedTime(&elapsedTime, tstart, tstop); transferTime += elapsedTime * 1.e-3; // convert to seconds // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); double kernelTime = 0; int k=0; bool stop; //Call the Kernel untill all the elements of Frontier are not false do { //if no thread changes this value then the loop stops stop=false; hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ; hipEventRecord(tstart, 0); hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes); hipEventRecord(tstop, 0); hipEventSynchronize(tstop); hipEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); // check if kernel execution generated an error hipEventRecord(tstart, 0); hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes); hipEventRecord(tstop, 0); hipEventSynchronize(tstop); hipEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR() hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ; k++; } while(stop); if(verbose) { printf("Kernel Executed %d times\n",k); } // copy result from device to host hipEventRecord(tstart, 0); hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ; hipEventRecord(tstop, 0); hipEventSynchronize(tstop); hipEventElapsedTime(&elapsedTime, tstart, tstop); transferTime += elapsedTime * 1.e-3; // convert to seconds //Store the result into a file string outfile = op.getOptionString("outputFile"); if(outfile != "") { FILE *fpo = fopen(outfile.c_str(),"w"); for(int i=0;i<no_of_nodes;i++) { fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]); } fclose(fpo); } // cleanup memory free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(d_graph_mask); hipFree(d_updating_graph_mask); hipFree(d_graph_visited); hipFree(d_cost); hipFree(d_over); char tmp[64]; sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size); string atts = string(tmp); resultDB.AddResult("bfs_transfer_time", atts, "sec", transferTime); resultDB.AddResult("bfs_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("bfs_total_time", atts, "sec", transferTime + kernelTime); resultDB.AddResult("bfs_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime); resultDB.AddResult("bfs_rate_edges", atts, "Edges/s", edge_list_size/kernelTime); resultDB.AddResult("bfs_rate_parity", atts, "N", transferTime / kernelTime); resultDB.AddOverall("Time", "sec", kernelTime+transferTime); return transferTime + kernelTime; } #ifdef UNIFIED_MEMORY //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Bfs graph with unified memory using CUDA. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> Kernel time. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges) { bool verbose = op.getOptionBool("verbose"); bool quiet = op.getOptionBool("quiet"); int device = op.getOptionInt("device"); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // copy graph nodes to unified memory Node* graph_nodes; CUDA_SAFE_CALL(hipMallocManaged(&graph_nodes, sizeof(Node)*no_of_nodes)); memcpy(graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes); hipError_t x; x = hipMemPrefetchAsync(graph_nodes, sizeof(Node)*no_of_nodes, device); if(x != hipSuccess) { printf("failed\n"); } //hipMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, hipMemAdviseSetReadMostly, device); //hipMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, hipMemAdviseSetPreferredLocation, device); // copy graph edges to unified memory int* graph_edges; CUDA_SAFE_CALL(hipMallocManaged(&graph_edges, sizeof(int)*edge_list_size)); memcpy(graph_edges, h_graph_edges, sizeof(int)*edge_list_size); x = hipMemPrefetchAsync(graph_edges, sizeof(int)*edge_list_size, device); if(x != hipSuccess) { printf("failed\n"); } hipMemAdvise(graph_edges, sizeof(int)*edge_list_size, hipMemAdviseSetReadMostly, device); hipMemAdvise(graph_edges, sizeof(int)*edge_list_size, hipMemAdviseSetPreferredLocation, device); // allocate and initalize the memory bool* graph_mask; bool* updating_graph_mask; bool* graph_visited; CUDA_SAFE_CALL(hipMallocManaged(&graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL(hipMallocManaged(&updating_graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL(hipMallocManaged(&graph_visited, sizeof(bool)*no_of_nodes)); x = hipMemPrefetchAsync(graph_mask, sizeof(bool)*no_of_nodes, device); if(x != hipSuccess) { printf("failed\n"); } x = hipMemPrefetchAsync(updating_graph_mask, sizeof(bool)*no_of_nodes, device); if(x != hipSuccess) { printf("failed\n"); } x = hipMemPrefetchAsync(graph_visited, sizeof(bool)*no_of_nodes, device); if(x != hipSuccess) { printf("failed\n"); } hipMemAdvise(graph_mask, sizeof(bool)*no_of_nodes, hipMemAdviseSetPreferredLocation, device); hipMemAdvise(updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemAdviseSetPreferredLocation, device); hipMemAdvise(graph_visited, sizeof(bool)*no_of_nodes, hipMemAdviseSetPreferredLocation, device); hipError_t err = hipGetLastError(); for( int i = 0; i < no_of_nodes; i++) { graph_mask[i]=false; updating_graph_mask[i]=false; graph_visited[i]=false; } //set the source node as true in the mask graph_mask[source]=true; graph_visited[source]=true; // allocate and initialize memory for result int* cost; CUDA_SAFE_CALL(hipMallocManaged(&cost, sizeof(int)*no_of_nodes)); if(err != hipSuccess) { hipFree(graph_nodes); hipFree(graph_edges); hipFree(graph_mask); hipFree(updating_graph_mask); hipFree(graph_visited); hipFree(cost); return FLT_MAX; } for(int i=0;i<no_of_nodes;i++) { cost[i]=-1; } cost[source]=0; // bool if execution is over bool* over; CUDA_SAFE_CALL(hipMallocManaged(&over, sizeof(bool))); // events for timing hipEvent_t tstart, tstop; hipEventCreate(&tstart); hipEventCreate(&tstop); float elapsedTime; // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); double kernelTime = 0; int k=0; bool stop; //Call the Kernel until all the elements of Frontier are not false do { stop = false; *over = stop; hipEventRecord(tstart, 0); hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, graph_nodes, graph_edges, graph_mask, updating_graph_mask, graph_visited, cost, no_of_nodes); hipEventRecord(tstop, 0); hipEventSynchronize(tstop); hipEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); // check if kernel execution generated an error hipEventRecord(tstart, 0); hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, graph_mask, updating_graph_mask, graph_visited, over, no_of_nodes); hipEventRecord(tstop, 0); hipEventSynchronize(tstop); hipEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR() stop = *over; k++; } while(stop); if(verbose && !quiet) { printf("Kernel Time: %f\n", kernelTime); printf("Kernel Executed %d times\n",k); } // cleanup memory hipFree(graph_nodes); hipFree(graph_edges); hipFree(graph_mask); hipFree(updating_graph_mask); hipFree(graph_visited); hipFree(cost); hipFree(over); char tmp[64]; sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size); string atts = string(tmp); resultDB.AddResult("bfs_unifiedmem_total_time", atts, "sec", kernelTime); resultDB.AddResult("bfs_unifiedmem_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime); resultDB.AddResult("bfs_unifiedmem_rate_edges", atts, "Edges/s", edge_list_size/kernelTime); return kernelTime; } #endif
2096e595527c7ce0b78acfe1d1e2475177651afb.cu
/*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Created by Pawan Harish. origin: Rodinia (http://rodinia.cs.virginia.edu/doku.php) ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string> #include <cassert> #include <math.h> #include <cuda.h> #include "cudacommon.h" #include "ResultDatabase.h" #include "OptionParser.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Minimum nodes. (Unused) </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MIN_NODES 20 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Maximum nodes. (Unused) </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MAX_NODES ULONG_MAX //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Minimum edges in the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MIN_EDGES 2 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Maximum Initialize edges in the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MAX_INIT_EDGES 4 // Nodes will have, on average, 2*MAX_INIT_EDGES edges //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Minimum weight of an edge. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MIN_WEIGHT 1 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines Maximum weight of an edge. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MAX_WEIGHT 10 //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A macro that defines seed for random number generator. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// #define SEED 7 using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> A node structure in the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> //////////////////////////////////////////////////////////////////////////////////////////////////// struct Node { /// <summary> The starting position. </summary> int starting; /// <summary> The number of edges connected to this node. </summary> int no_of_edges; }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Initializes the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> [in,out] The no of nodes. </param> /// <param name="edge_list_size"> [in,out] Size of the edge list. </param> /// <param name="source"> [in,out] Source for the initialization. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges); //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS graph runner. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> A float. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges); #ifdef UNIFIED_MEMORY //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS graph using unified memory. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> A float. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges); #endif //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS Kernel. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="g_graph_nodes"> [in,out] If non-null, the graph nodes. </param> /// <param name="g_graph_edges"> [in,out] If non-null, the graph edges. </param> /// <param name="g_graph_mask"> [in,out] If non-null, true to graph mask. </param> /// <param name="g_updating_graph_mask"> [in,out] If non-null, true to updating graph mask. </param> /// <param name="g_graph_visited"> [in,out] If non-null, true if graph visited. </param> /// <param name="g_cost"> [in,out] If non-null, the cost. </param> /// <param name="no_of_nodes"> The no of nodes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes) { int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x; if( tid<no_of_nodes && g_graph_mask[tid]) { g_graph_mask[tid]=false; for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++) { int id = g_graph_edges[i]; if(!g_graph_visited[id]) { g_cost[id]=g_cost[tid]+1; g_updating_graph_mask[id]=true; } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> BFS Kernel 2. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="g_graph_mask"> [in,out] If non-null, true to graph mask. </param> /// <param name="g_updating_graph_mask"> [in,out] If non-null, true to updating graph mask. </param> /// <param name="g_graph_visited"> [in,out] If non-null, true if graph visited. </param> /// <param name="g_over"> [in,out] If non-null, true to over. </param> /// <param name="no_of_nodes"> The no of nodes. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void Kernel2( bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes) { int tid = (blockIdx.x*MAX_THREADS_PER_BLOCK) + threadIdx.x; if( tid<no_of_nodes && g_updating_graph_mask[tid]) { g_graph_mask[tid]=true; g_graph_visited[tid]=true; *g_over=true; g_updating_graph_mask[tid]=false; } } //////////////////////////////////////////////////////////////////////////////// // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op) { } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the radix sort benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing, results are stored in resultDB // // Programmer: Kyle Spafford // Creation: August 13, 2009 // // Modifications: // // **************************************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { printf("Running BFS\n"); int device; cudaGetDevice(&device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); // seed random number generator srand(SEED); int no_of_nodes = 0; int edge_list_size = 0; int source = 0; Node* h_graph_nodes; int* h_graph_edges; initGraph(op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges); // atts string for result database char tmp[64]; sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size); string atts = string(tmp); bool quiet = op.getOptionBool("quiet"); int passes = op.getOptionInt("passes"); for(int i = 0; i < passes; i++) { if(!quiet) { printf("Pass %d:\n", i); } #ifdef UNIFIED_MEMORY float timeUM = BFSGraphUnifiedMemory(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges); if(!quiet) { if(timeUM == FLT_MAX) { printf("Executing BFS using unified memory...Error.\n"); } else { printf("Executing BFS using unified memory...Done.\n"); } } //if(time != FLT_MAX && timeUM != FLT_MAX) { if(timeUM != FLT_MAX) { //resultDB.AddResult("bfs_unifiedmem_speedup", atts, "N", time/timeUM); } #else float time = BFSGraph(resultDB, op, no_of_nodes, edge_list_size, source, h_graph_nodes, h_graph_edges); if(!quiet) { if(time == FLT_MAX) { printf("Executing BFS...Error.\n"); } else { printf("Executing BFS...Done.\n"); } } #endif } free( h_graph_nodes); free( h_graph_edges); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Generate Uniform distribution. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="rangeLow"> The range low. </param> /// <param name="rangeHigh"> The range high. </param> /// /// <returns> A scaled random int. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// int uniform_distribution(int rangeLow, int rangeHigh) { double myRand = rand()/(1.0 + RAND_MAX); int range = rangeHigh - rangeLow + 1; int myRand_scaled = (myRand * range) + rangeLow; return myRand_scaled; } //////////////////////////////////////////////////////////////////////////////// //Initialize Graph //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Initializes the graph. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> [in,out] The no of nodes. </param> /// <param name="edge_list_size"> [in,out] Size of the edge list. </param> /// <param name="source"> [in,out] Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// void initGraph(OptionParser &op, int &no_of_nodes, int &edge_list_size, int &source, Node* &h_graph_nodes, int* &h_graph_edges) { bool quiet = op.getOptionBool("quiet"); // open input file for reading FILE *fp = NULL; string infile = op.getOptionString("inputFile"); if(infile != "") { fp = fopen(infile.c_str(),"r"); if(!fp && !quiet) { printf("Error: Unable to read graph file %s.\n", infile.c_str()); } } if(!quiet) { if(fp) { printf("Reading graph file\n"); } else { printf("Generating graph with problem size %d\n", (int)op.getOptionInt("size")); } } // initialize number of nodes if(fp) { int n = fscanf(fp,"%d",&no_of_nodes); assert(n == 1); } else { int problemSizes[4] = {10, 50, 200, 400}; no_of_nodes = problemSizes[op.getOptionInt("size") - 1] * 1024 * 1024; } // initalize the nodes & number of edges h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); int start; int edgeno; for( int i = 0; i < no_of_nodes; i++) { if(fp) { int n = fscanf(fp,"%d %d",&start,&edgeno); assert(n == 2); } else { start = edge_list_size; edgeno = rand() % (MAX_INIT_EDGES - MIN_EDGES + 1) + MIN_EDGES; } h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; edge_list_size += edgeno; } // initialize the source node if(fp) { int n = fscanf(fp,"%d",&source); assert(n == 1); } else { source = uniform_distribution(0, no_of_nodes - 1); } source = 0; if(fp) { int edges; int n = fscanf(fp,"%d",&edges); assert(n == 1); assert(edges == edge_list_size); } // initialize the edges int id; int cost; h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); for(int i=0; i < edge_list_size ; i++) { if(fp) { int n = fscanf(fp,"%d %d",&id, &cost); assert(n == 2); } else { id = uniform_distribution(0, no_of_nodes - 1); //cost = rand() % (MAX_WEIGHT - MIN_WEIGHT + 1) + MIN_WEIGHT; } h_graph_edges[i] = id; } if(!quiet) { if(fp) { fclose(fp); printf("Done reading graph file\n"); } else { printf("Done generating graph\n"); } printf("Graph size: %d nodes, %d edges\n", no_of_nodes, edge_list_size); } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Bfs graph using CUDA. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> Transfer time and kernel time. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraph(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges) { bool verbose = op.getOptionBool("verbose"); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // allocate host memory bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes); // initalize the memory for( int i = 0; i < no_of_nodes; i++) { h_graph_mask[i]=false; h_updating_graph_mask[i]=false; h_graph_visited[i]=false; } //set the source node as true in the mask h_graph_mask[source]=true; h_graph_visited[source]=true; // allocate mem for the result on host side int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes); for(int i=0;i<no_of_nodes;i++) { h_cost[i]=-1; } h_cost[source]=0; // node list Node* d_graph_nodes; // edge list int* d_graph_edges; // mask bool* d_graph_mask; bool* d_updating_graph_mask; // visited nodes bool* d_graph_visited; // result int* d_cost; // bool if execution is over bool *d_over; CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size)); CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes)); CUDA_SAFE_CALL_NOEXIT(cudaMalloc( (void**) &d_over, sizeof(bool))); cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) { free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(d_graph_mask); cudaFree(d_updating_graph_mask); cudaFree(d_graph_visited); cudaFree(d_cost); cudaFree(d_over); return FLT_MAX; } cudaEvent_t tstart, tstop; cudaEventCreate(&tstart); cudaEventCreate(&tstop); float elapsedTime; double transferTime = 0.; cudaEventRecord(tstart, 0); cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ; cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ; cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ; cudaEventRecord(tstop, 0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&elapsedTime, tstart, tstop); transferTime += elapsedTime * 1.e-3; // convert to seconds // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); double kernelTime = 0; int k=0; bool stop; //Call the Kernel untill all the elements of Frontier are not false do { //if no thread changes this value then the loop stops stop=false; cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ; cudaEventRecord(tstart, 0); Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes); cudaEventRecord(tstop, 0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); // check if kernel execution generated an error cudaEventRecord(tstart, 0); Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes); cudaEventRecord(tstop, 0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR() cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ; k++; } while(stop); if(verbose) { printf("Kernel Executed %d times\n",k); } // copy result from device to host cudaEventRecord(tstart, 0); cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ; cudaEventRecord(tstop, 0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&elapsedTime, tstart, tstop); transferTime += elapsedTime * 1.e-3; // convert to seconds //Store the result into a file string outfile = op.getOptionString("outputFile"); if(outfile != "") { FILE *fpo = fopen(outfile.c_str(),"w"); for(int i=0;i<no_of_nodes;i++) { fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]); } fclose(fpo); } // cleanup memory free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(d_graph_mask); cudaFree(d_updating_graph_mask); cudaFree(d_graph_visited); cudaFree(d_cost); cudaFree(d_over); char tmp[64]; sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size); string atts = string(tmp); resultDB.AddResult("bfs_transfer_time", atts, "sec", transferTime); resultDB.AddResult("bfs_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("bfs_total_time", atts, "sec", transferTime + kernelTime); resultDB.AddResult("bfs_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime); resultDB.AddResult("bfs_rate_edges", atts, "Edges/s", edge_list_size/kernelTime); resultDB.AddResult("bfs_rate_parity", atts, "N", transferTime / kernelTime); resultDB.AddOverall("Time", "sec", kernelTime+transferTime); return transferTime + kernelTime; } #ifdef UNIFIED_MEMORY //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Bfs graph with unified memory using CUDA. </summary> /// /// <remarks> Edward Hu ([email protected]), 5/19/2020. </remarks> /// /// <param name="resultDB"> [in,out] The result database. </param> /// <param name="op"> [in,out] The operation. </param> /// <param name="no_of_nodes"> The no of nodes. </param> /// <param name="edge_list_size"> Size of the edge list. </param> /// <param name="source"> Source for the. </param> /// <param name="h_graph_nodes"> [in,out] [in,out] If non-null, the graph nodes. </param> /// <param name="h_graph_edges"> [in,out] [in,out] If non-null, the graph edges. </param> /// /// <returns> Kernel time. </returns> //////////////////////////////////////////////////////////////////////////////////////////////////// float BFSGraphUnifiedMemory(ResultDatabase &resultDB, OptionParser &op, int no_of_nodes, int edge_list_size, int source, Node* &h_graph_nodes, int* &h_graph_edges) { bool verbose = op.getOptionBool("verbose"); bool quiet = op.getOptionBool("quiet"); int device = op.getOptionInt("device"); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } // copy graph nodes to unified memory Node* graph_nodes; CUDA_SAFE_CALL(cudaMallocManaged(&graph_nodes, sizeof(Node)*no_of_nodes)); memcpy(graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes); cudaError_t x; x = cudaMemPrefetchAsync(graph_nodes, sizeof(Node)*no_of_nodes, device); if(x != cudaSuccess) { printf("failed\n"); } //cudaMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, cudaMemAdviseSetReadMostly, device); //cudaMemAdvise(graph_nodes, sizeof(Node)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device); // copy graph edges to unified memory int* graph_edges; CUDA_SAFE_CALL(cudaMallocManaged(&graph_edges, sizeof(int)*edge_list_size)); memcpy(graph_edges, h_graph_edges, sizeof(int)*edge_list_size); x = cudaMemPrefetchAsync(graph_edges, sizeof(int)*edge_list_size, device); if(x != cudaSuccess) { printf("failed\n"); } cudaMemAdvise(graph_edges, sizeof(int)*edge_list_size, cudaMemAdviseSetReadMostly, device); cudaMemAdvise(graph_edges, sizeof(int)*edge_list_size, cudaMemAdviseSetPreferredLocation, device); // allocate and initalize the memory bool* graph_mask; bool* updating_graph_mask; bool* graph_visited; CUDA_SAFE_CALL(cudaMallocManaged(&graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL(cudaMallocManaged(&updating_graph_mask, sizeof(bool)*no_of_nodes)); CUDA_SAFE_CALL(cudaMallocManaged(&graph_visited, sizeof(bool)*no_of_nodes)); x = cudaMemPrefetchAsync(graph_mask, sizeof(bool)*no_of_nodes, device); if(x != cudaSuccess) { printf("failed\n"); } x = cudaMemPrefetchAsync(updating_graph_mask, sizeof(bool)*no_of_nodes, device); if(x != cudaSuccess) { printf("failed\n"); } x = cudaMemPrefetchAsync(graph_visited, sizeof(bool)*no_of_nodes, device); if(x != cudaSuccess) { printf("failed\n"); } cudaMemAdvise(graph_mask, sizeof(bool)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device); cudaMemAdvise(updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device); cudaMemAdvise(graph_visited, sizeof(bool)*no_of_nodes, cudaMemAdviseSetPreferredLocation, device); cudaError_t err = cudaGetLastError(); for( int i = 0; i < no_of_nodes; i++) { graph_mask[i]=false; updating_graph_mask[i]=false; graph_visited[i]=false; } //set the source node as true in the mask graph_mask[source]=true; graph_visited[source]=true; // allocate and initialize memory for result int* cost; CUDA_SAFE_CALL(cudaMallocManaged(&cost, sizeof(int)*no_of_nodes)); if(err != cudaSuccess) { cudaFree(graph_nodes); cudaFree(graph_edges); cudaFree(graph_mask); cudaFree(updating_graph_mask); cudaFree(graph_visited); cudaFree(cost); return FLT_MAX; } for(int i=0;i<no_of_nodes;i++) { cost[i]=-1; } cost[source]=0; // bool if execution is over bool* over; CUDA_SAFE_CALL(cudaMallocManaged(&over, sizeof(bool))); // events for timing cudaEvent_t tstart, tstop; cudaEventCreate(&tstart); cudaEventCreate(&tstop); float elapsedTime; // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); double kernelTime = 0; int k=0; bool stop; //Call the Kernel until all the elements of Frontier are not false do { stop = false; *over = stop; cudaEventRecord(tstart, 0); Kernel<<< grid, threads, 0 >>>(graph_nodes, graph_edges, graph_mask, updating_graph_mask, graph_visited, cost, no_of_nodes); cudaEventRecord(tstop, 0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); // check if kernel execution generated an error cudaEventRecord(tstart, 0); Kernel2<<< grid, threads, 0 >>>(graph_mask, updating_graph_mask, graph_visited, over, no_of_nodes); cudaEventRecord(tstop, 0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&elapsedTime, tstart, tstop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR() stop = *over; k++; } while(stop); if(verbose && !quiet) { printf("Kernel Time: %f\n", kernelTime); printf("Kernel Executed %d times\n",k); } // cleanup memory cudaFree(graph_nodes); cudaFree(graph_edges); cudaFree(graph_mask); cudaFree(updating_graph_mask); cudaFree(graph_visited); cudaFree(cost); cudaFree(over); char tmp[64]; sprintf(tmp, "%dV,%dE", no_of_nodes, edge_list_size); string atts = string(tmp); resultDB.AddResult("bfs_unifiedmem_total_time", atts, "sec", kernelTime); resultDB.AddResult("bfs_unifiedmem_rate_nodes", atts, "Nodes/s", no_of_nodes/kernelTime); resultDB.AddResult("bfs_unifiedmem_rate_edges", atts, "Edges/s", edge_list_size/kernelTime); return kernelTime; } #endif
de666ee0f7ed7b2790929552f4b558441ca6d808.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #define DATA_ELEM 75 #define DB_ELEM 20 #define DB_SIZE 100 using namespace std; struct Data { unsigned int d1; unsigned int d2; }; union Item { Data data; unsigned long long int raw; }; // TODO, changes only in kernel_write // database has empty(0) and non-empty(!=0) positions, around DB_ELEM/DB_SIZE occupied // change the kernel so each thread writes dataToAdd in an empty spot in database // reordoring of data is permitted // (0 1 0 1 0 ) => (t1:2, t2:3) => (2 1 1 3 0) __global__ void kernel_write(unsigned long long int *data, unsigned long long int *database, int dbElemNum) { int idx = blockIdx.x * blockDim.x + threadIdx.x; long long unsigned dataToAdd = data[idx]; long long unsigned oldData; if(dataToAdd == 0) { return; } // logic using atomicExch to place elements in empty slots, no barriers // TODO while ((oldData = atomicExch(database + idx, dataToAdd))) { atomicExch(database + idx, oldData); idx = (idx + 1) % dbElemNum; } } // do not modify validateDB and main // void validateDB(unsigned long long int *database, int dbSize, int expNZElem) { bool isValid = true; int numNZElem = 0; for(int i = 0; i < DB_SIZE; i++) { Item item; item.raw = database[i]; if(item.raw != 0) { numNZElem++; } cout << item.data.d1 << "-" << item.data.d2; if(item.data.d1 != item.data.d2) { cout << " ERR, "; isValid = false; } else { cout << " OK, "; } } if (!isValid) { cout << endl << "INVALID, corrupt writes" << endl; } else if (expNZElem != numNZElem) { cout << endl << "INVALID, expected database elements " << expNZElem << " but got " << numNZElem << endl; } else { cout << endl << "VALID" << endl; } } int main(void) { long long unsigned *data = 0; hipMallocManaged(&data, DATA_ELEM * sizeof(unsigned long long int)); if (data == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // data to introduce into db for(int i = 0; i < DATA_ELEM; i++) { Item item; item.data.d1 = i + 1; item.data.d2 = i + 1; data[i] = item.raw; } long long unsigned *database = 0; hipMallocManaged(&database, DB_SIZE * sizeof(unsigned long long int)); if (data == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // current db is 25% full, 75% empty for(int i = 0; i < DB_SIZE; i++) { if(i % (DB_SIZE / DB_ELEM) == 0) { Item item; item.data.d1 = 1111; item.data.d2 = 1111; database[i] = item.raw; } } hipLaunchKernelGGL(( kernel_write), dim3(DATA_ELEM), dim3(1), 0, 0, data, database, DB_SIZE); hipDeviceSynchronize(); validateDB(database, DB_SIZE, (DB_ELEM + DATA_ELEM)); hipFree(data); hipFree(database); return 0; }
de666ee0f7ed7b2790929552f4b558441ca6d808.cu
#include <iostream> #define DATA_ELEM 75 #define DB_ELEM 20 #define DB_SIZE 100 using namespace std; struct Data { unsigned int d1; unsigned int d2; }; union Item { Data data; unsigned long long int raw; }; // TODO, changes only in kernel_write // database has empty(0) and non-empty(!=0) positions, around DB_ELEM/DB_SIZE occupied // change the kernel so each thread writes dataToAdd in an empty spot in database // reordoring of data is permitted // (0 1 0 1 0 ) => (t1:2, t2:3) => (2 1 1 3 0) __global__ void kernel_write(unsigned long long int *data, unsigned long long int *database, int dbElemNum) { int idx = blockIdx.x * blockDim.x + threadIdx.x; long long unsigned dataToAdd = data[idx]; long long unsigned oldData; if(dataToAdd == 0) { return; } // logic using atomicExch to place elements in empty slots, no barriers // TODO while ((oldData = atomicExch(database + idx, dataToAdd))) { atomicExch(database + idx, oldData); idx = (idx + 1) % dbElemNum; } } // do not modify validateDB and main // void validateDB(unsigned long long int *database, int dbSize, int expNZElem) { bool isValid = true; int numNZElem = 0; for(int i = 0; i < DB_SIZE; i++) { Item item; item.raw = database[i]; if(item.raw != 0) { numNZElem++; } cout << item.data.d1 << "-" << item.data.d2; if(item.data.d1 != item.data.d2) { cout << " ERR, "; isValid = false; } else { cout << " OK, "; } } if (!isValid) { cout << endl << "INVALID, corrupt writes" << endl; } else if (expNZElem != numNZElem) { cout << endl << "INVALID, expected database elements " << expNZElem << " but got " << numNZElem << endl; } else { cout << endl << "VALID" << endl; } } int main(void) { long long unsigned *data = 0; cudaMallocManaged(&data, DATA_ELEM * sizeof(unsigned long long int)); if (data == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // data to introduce into db for(int i = 0; i < DATA_ELEM; i++) { Item item; item.data.d1 = i + 1; item.data.d2 = i + 1; data[i] = item.raw; } long long unsigned *database = 0; cudaMallocManaged(&database, DB_SIZE * sizeof(unsigned long long int)); if (data == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // current db is 25% full, 75% empty for(int i = 0; i < DB_SIZE; i++) { if(i % (DB_SIZE / DB_ELEM) == 0) { Item item; item.data.d1 = 1111; item.data.d2 = 1111; database[i] = item.raw; } } kernel_write<<<DATA_ELEM, 1>>> (data, database, DB_SIZE); cudaDeviceSynchronize(); validateDB(database, DB_SIZE, (DB_ELEM + DATA_ELEM)); cudaFree(data); cudaFree(database); return 0; }
d478570dee7372912e20ebcf0368a6095f04387a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstring> #include <vector> #include <map> #include <set> #include <cassert> #include <string> #include <iomanip> #include "thrust_wrappers.h" #include "utils.h" #include "hashmap_utils.h" #define ptr(a) thrust::raw_pointer_cast((a).data()) #define NO_EDGE 0 #define BLOCKS_NUMBER 256 #define THREADS_PER_BLOCK 256 struct StepPolicy { int node_start; int node_step; int edge_start; int edge_step; }; __device__ void update_max_modularity(int* max_C, float* max_delta_modularity, int new_C, float new_delta_modulatiry) { if (new_delta_modulatiry > *max_delta_modularity || new_delta_modulatiry == *max_delta_modularity && new_C < *max_C) { *max_C = new_C; *max_delta_modularity = new_delta_modulatiry; } } __global__ void compute_move(int n, int* new_C, int* V, int* N, float* W, int* C, int* comm_size, float* k, float* ac, const float weights_sum, int* hash_offset, float* hash_weight, int* hash_comm) { __shared__ int partial_C_max[THREADS_PER_BLOCK]; __shared__ float partial_delta_mod[THREADS_PER_BLOCK]; int tid = threadIdx.x; int step = blockDim.x; for (int i = blockIdx.x; i < n; i += gridDim.x) { int offset = hash_offset[i]; int size = hash_offset[i + 1] - offset; int ci = C[i]; int pos; if (size == 0) { continue; } for (int j = tid + V[i]; j < V[i + 1]; j += blockDim.x) { if (W[j] == NO_EDGE) break; if (N[j] != i) { hashmap_insert(hash_comm, hash_weight, offset, size, C[N[j]], W[j]); } } __syncthreads(); partial_C_max[tid] = n; partial_delta_mod[tid] = -1; for (pos = offset + tid; pos < offset + size; pos += step) { if (hash_comm[pos] == EMPTY_SLOT) continue; int new_C = hash_comm[pos]; float deltaMod = hash_weight[pos] / weights_sum + k[i] * (ac[ci] - k[i] - ac[new_C]) / 2 / weights_sum / weights_sum; if (comm_size[new_C] > 1 || comm_size[ci] > 1 || new_C < ci) { update_max_modularity(&partial_C_max[tid], &partial_delta_mod[tid], new_C, deltaMod); } } __syncthreads(); for (int s = blockDim.x / 2; s > 0 ; s >>= 1) { if (tid < s) { update_max_modularity(&partial_C_max[tid], &partial_delta_mod[tid], partial_C_max[tid + s], partial_delta_mod[tid + s]); } __syncthreads(); } if (tid == 0) { pos = hashmap_find(hash_comm, offset, size, ci); if (partial_delta_mod[0] - hash_weight[pos] / weights_sum > 0) { new_C[i] = partial_C_max[0]; } else { new_C[i] = ci; } } } } __global__ void calculate_modularity(int n, int c, int* V, int* N, float* W, int* C, int* uniqueC, float* ac, const float weights_sum, float* Q) { __shared__ float partials[THREADS_PER_BLOCK]; int tid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int step = blockDim.x * gridDim.x; float a = 0; for (int i = blockIdx.x; i < n; i += gridDim.x) { for (int j = tid + V[i]; j < V[i + 1]; j += bdim) { if (C[N[j]] == C[i]) { a += W[j] / 2 / weights_sum; } } } for (int i = bid * bdim + tid; i < c; i += step) { a -= ac[uniqueC[i]] * ac[uniqueC[i]] / 4 / weights_sum / weights_sum; } partials[tid] = a; __syncthreads(); for (int s = bdim / 2; s > 0; s >>= 1) { if (tid < s) { partials[tid] += partials[tid + s]; } __syncthreads(); } if (tid == 0) { Q[bid] = partials[0]; } } __global__ void initialize_k(int n, const int* V, const float* W, float* k) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int j = V[i]; j < V[i + 1]; ++j) { if (W[j] == NO_EDGE) break; k[i] += W[j]; } } } __global__ void initialize_ac(int n, int* C, float* k, float* ac) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicAdd(&ac[C[i]], k[i]); } } void initialize_uniqueC_and_C(int n, const dvi& C, dvi& uniqueC, int& c) { uniqueC = C; thrust_sort(uniqueC); c = thrust_unique(uniqueC); } __global__ void initialize_degree(int n, int* V, float* W, int* degree) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int ctr = 0; for (int j = V[i]; j < V[i + 1]; ++j) { if (W[j] == NO_EDGE) break; ctr++; } degree[i] = ctr; } } __global__ void initialize_comm_size(int n, int* C, int* comm_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicAdd(&comm_size[C[i]], 1); } } __global__ void initialize_comm_degree(int n, int* degree, int* C, int* comDegree) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicAdd(&comDegree[C[i]], degree[i]); } } __global__ void initialize_newID(int n, int* C, int* comm_size, int* newID) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (comm_size[C[i]] != 0) { atomicCAS(&newID[C[i]], 0, 1); } } } __global__ void initialize_comm(int n, int* C, int* comm, int* vertex_start) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int res = atomicSub(&vertex_start[C[i]], 1) - 1; comm[res] = i; } } __global__ void initialize_aggregated_V(int n, int* C, int* newID, int* edge_pos, int* aggregated_V) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicCAS(&aggregated_V[newID[C[i]] + 1], 0, edge_pos[C[i]]); } } __global__ void save_final_communities(int initial_n, int* finalC, int* C, int* newID) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < initial_n; i += blockDim.x * gridDim.x) { finalC[i] = newID[C[finalC[i]]]; } } __global__ void merge_community_fill_hashmap(int n, int* V, int* N, float* W, int* C, int* comm, int* degree, int* newID, int* hash_offset, int* hash_comm, float* hash_weight, bool debug) { for (int idx = blockIdx.x; idx < n; idx += gridDim.x) { int tid = threadIdx.x; int step = blockDim.x; int i = comm[idx]; int new_ci = newID[C[i]]; int offset = hash_offset[new_ci]; int size = hash_offset[new_ci + 1] - offset; if (size == 0) { continue; } if (debug) { assert(size >= degree[i]); } for (int j = tid + V[i]; j < V[i + 1]; j += step) { if (W[j] == NO_EDGE) break; hashmap_insert(hash_comm, hash_weight, offset, size, newID[C[N[j]]], W[j]); } } } __global__ void merge_community_initialize_graph(int* hash_offset, int* hash_comm, float* hash_weight, int aggregated_n, int* aggregated_V, int* aggregated_N, float* aggregated_W) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < aggregated_n; i += blockDim.x * gridDim.x) { int edgeId = aggregated_V[i]; for (int pos = hash_offset[i]; pos < hash_offset[i + 1]; ++pos) { int new_cj = hash_comm[pos]; if (new_cj == EMPTY_SLOT) { continue; } aggregated_N[edgeId] = new_cj; aggregated_W[edgeId] = hash_weight[pos]; edgeId++; } } } int main(int argc, char *argv[]) { bool show_assignment = false; float threshold = 0; std::string matrix_file; bool debug = false; int n; //number vertices int m; //number of edges dvi V; //vertices dvi N; //neighbours dvf W; //weights float weights_sum; //sum of weights dvi C; //current clustering dvi new_C; //temporary array to store new communities dvf k; //sum of vertex's edges dvf ac; //sum of cluster edges int c; //number of communities dvi uniqueC; //list of unique communities ids dvi comm_size; //size of ech community dvi degree; //degree of each vertex int initial_n; //number of vertices in the first iteration dvi finalC; //final clustering result float Qp, Qc; //modularity before outermostloop iteration, before and after modularity optimisation respectively hipEvent_t start_time, stop_time; hipEvent_t tmp_start_time, tmp_stop_time; float memory_transfer_time = 0; parse_command_line(show_assignment, threshold, matrix_file, argc, argv, debug); vi stl_V; vi stl_N; vf stl_W; read_graph_from_file(matrix_file, n, m, stl_V, stl_N, stl_W); if (debug) std::cerr << "finished data loading" << std::endl; start_recording_time(tmp_start_time, tmp_stop_time); V = stl_V; N = stl_N; W = stl_W; memory_transfer_time += stop_recording_time(tmp_start_time, tmp_stop_time); thrust_sort_graph(V, N, W); dvi just_ones(m, 1); dvi indices(n); dvi occurences(n); thrust_reduce_by_key(V, just_ones, indices, occurences); start_recording_time(tmp_start_time, tmp_stop_time); hvi host_indices = indices; hvi host_occurences = occurences; memory_transfer_time += stop_recording_time(tmp_start_time, tmp_stop_time); hvi host_V(n + 1); host_V[0] = 0; int ptr = 0; for (int i = 0; i < n; i++) { if (host_indices[ptr] == i) { host_V[i + 1] = host_V[i] + host_occurences[ptr]; ptr++; } else { host_V[i + 1] = host_V[i]; } } start_recording_time(tmp_start_time, tmp_stop_time); V = host_V; memory_transfer_time += stop_recording_time(tmp_start_time, tmp_stop_time); start_recording_time(start_time, stop_time); initial_n = n; weights_sum = thrust_sum(W) / 2; finalC = dvi(n); thrust_sequence(finalC); while (true) { C = dvi(n); thrust_sequence(C); k = vf(n, 0); hipLaunchKernelGGL(( initialize_k), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(V), ptr(W), ptr(k)); if (debug) { float ksum = thrust_sum(k); assert(abs(ksum - 2 * weights_sum) < 0.001 * ksum); } ac = k; //modularity optimisation phase initialize_uniqueC_and_C(n, C, uniqueC, c); dvf dQc(BLOCKS_NUMBER); hipLaunchKernelGGL(( calculate_modularity), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, c, ptr(V), ptr(N), ptr(W), ptr(C), ptr(uniqueC), ptr(ac), weights_sum, ptr(dQc)); Qc = thrust_sum(dQc); if (debug) { std::cerr << "modularity: " << Qc << std::endl; } do { new_C = C; comm_size = dvi(n, 0); hipLaunchKernelGGL(( initialize_comm_size), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(C), ptr(comm_size)); degree = dvi(n, 0); hipLaunchKernelGGL(( initialize_degree), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(V), ptr(W), ptr(degree)); dvi hash_size = dvi(n); thrust_transform_hashmap_size(degree, hash_size, 1.5); dvi hash_offset; dvi hash_comm; dvf hash_weight; hashmap_create(hash_size, hash_offset, hash_comm, hash_weight); hipLaunchKernelGGL(( compute_move), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(new_C), ptr(V), ptr(N), ptr(W), ptr(C), ptr(comm_size), ptr(k), ptr(ac), weights_sum, ptr(hash_offset), ptr(hash_weight), ptr(hash_comm)); C = new_C; ac.assign(n, 0); hipLaunchKernelGGL(( initialize_ac), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(C), ptr(k), ptr(ac)); if (debug) { float acsum = thrust_sum(ac); assert(abs(acsum - 2 * weights_sum) < 0.001 * acsum); } Qp = Qc; initialize_uniqueC_and_C(n, C, uniqueC, c); dvf dQc(BLOCKS_NUMBER); hipLaunchKernelGGL(( calculate_modularity), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, c, ptr(V), ptr(N), ptr(W), ptr(C), ptr(uniqueC), ptr(ac), weights_sum, ptr(dQc)); Qc = thrust_sum(dQc); if (debug) { std::cerr << "modularity: " << Qc << std::endl; } } while (Qc - Qp > threshold); //AGGREGATION PHASE degree = dvi(n, 0); hipLaunchKernelGGL(( initialize_degree), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(V), ptr(W), ptr(degree)); dvi comDegree(n, 0); hipLaunchKernelGGL(( initialize_comm_degree), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(degree), ptr(C), ptr(comDegree)); comm_size = dvi(n, 0); hipLaunchKernelGGL(( initialize_comm_size), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(C), ptr(comm_size)); dvi newID(n, 0); hipLaunchKernelGGL(( initialize_newID), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(C), ptr(comm_size), ptr(newID)); thrust_inclusive_scan(newID); thrust_sub_for_each(newID, 1); dvi edge_pos = comDegree; thrust_inclusive_scan(edge_pos); dvi vertex_start = comm_size; thrust_inclusive_scan(vertex_start); dvi comm(n); hipLaunchKernelGGL(( initialize_comm), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(C), ptr(comm), ptr(vertex_start)); int aggregated_n; int aggregated_m; dvi aggregated_V; dvi aggregated_N; dvf aggregated_W; aggregated_n = newID.back() + 1; if (aggregated_n == n) { //nothing changed break; } aggregated_m = edge_pos.back(); aggregated_V = dvi(aggregated_n + 1, 0); hipLaunchKernelGGL(( initialize_aggregated_V), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(C), ptr(newID), ptr(edge_pos), ptr(aggregated_V)); aggregated_N = dvi(aggregated_m, -1); aggregated_W = dvf(aggregated_m, NO_EDGE); dvi hash_size = dvi(aggregated_n); thrust_copy_if_non_zero(comDegree, comm_size, hash_size); dvi hash_offset; dvi hash_comm; dvf hash_weight; thrust_transform_hashmap_size(hash_size, hash_size, 1.5); hashmap_create(hash_size, hash_offset, hash_comm, hash_weight); hipLaunchKernelGGL(( merge_community_fill_hashmap), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, n, ptr(V), ptr(N), ptr(W), ptr(C), ptr(comm), ptr(degree), ptr(newID), ptr(hash_offset), ptr(hash_comm), ptr(hash_weight), debug); hipLaunchKernelGGL(( merge_community_initialize_graph), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, ptr(hash_offset), ptr(hash_comm), ptr(hash_weight), aggregated_n, ptr(aggregated_V), ptr(aggregated_N), ptr(aggregated_W)); hipLaunchKernelGGL(( save_final_communities), dim3(BLOCKS_NUMBER), dim3(THREADS_PER_BLOCK), 0, 0, initial_n, ptr(finalC), ptr(C), ptr(newID)); n = aggregated_n; m = aggregated_m; V = aggregated_V; N = aggregated_N; W = aggregated_W; }; std::cout << std::fixed << Qc << std::endl; float algorithm_time = stop_recording_time(start_time, stop_time); printf("%3.1f %3.1f\n", algorithm_time, memory_transfer_time); if (show_assignment) { hvi host_finalC = finalC; vi stl_finalC(ptr(host_finalC), ptr(host_finalC) + initial_n); print_clustering(initial_n, stl_finalC); } return 0; }
d478570dee7372912e20ebcf0368a6095f04387a.cu
#include <iostream> #include <cstring> #include <vector> #include <map> #include <set> #include <cassert> #include <string> #include <iomanip> #include "thrust_wrappers.h" #include "utils.h" #include "hashmap_utils.h" #define ptr(a) thrust::raw_pointer_cast((a).data()) #define NO_EDGE 0 #define BLOCKS_NUMBER 256 #define THREADS_PER_BLOCK 256 struct StepPolicy { int node_start; int node_step; int edge_start; int edge_step; }; __device__ void update_max_modularity(int* max_C, float* max_delta_modularity, int new_C, float new_delta_modulatiry) { if (new_delta_modulatiry > *max_delta_modularity || new_delta_modulatiry == *max_delta_modularity && new_C < *max_C) { *max_C = new_C; *max_delta_modularity = new_delta_modulatiry; } } __global__ void compute_move(int n, int* new_C, int* V, int* N, float* W, int* C, int* comm_size, float* k, float* ac, const float weights_sum, int* hash_offset, float* hash_weight, int* hash_comm) { __shared__ int partial_C_max[THREADS_PER_BLOCK]; __shared__ float partial_delta_mod[THREADS_PER_BLOCK]; int tid = threadIdx.x; int step = blockDim.x; for (int i = blockIdx.x; i < n; i += gridDim.x) { int offset = hash_offset[i]; int size = hash_offset[i + 1] - offset; int ci = C[i]; int pos; if (size == 0) { continue; } for (int j = tid + V[i]; j < V[i + 1]; j += blockDim.x) { if (W[j] == NO_EDGE) break; if (N[j] != i) { hashmap_insert(hash_comm, hash_weight, offset, size, C[N[j]], W[j]); } } __syncthreads(); partial_C_max[tid] = n; partial_delta_mod[tid] = -1; for (pos = offset + tid; pos < offset + size; pos += step) { if (hash_comm[pos] == EMPTY_SLOT) continue; int new_C = hash_comm[pos]; float deltaMod = hash_weight[pos] / weights_sum + k[i] * (ac[ci] - k[i] - ac[new_C]) / 2 / weights_sum / weights_sum; if (comm_size[new_C] > 1 || comm_size[ci] > 1 || new_C < ci) { update_max_modularity(&partial_C_max[tid], &partial_delta_mod[tid], new_C, deltaMod); } } __syncthreads(); for (int s = blockDim.x / 2; s > 0 ; s >>= 1) { if (tid < s) { update_max_modularity(&partial_C_max[tid], &partial_delta_mod[tid], partial_C_max[tid + s], partial_delta_mod[tid + s]); } __syncthreads(); } if (tid == 0) { pos = hashmap_find(hash_comm, offset, size, ci); if (partial_delta_mod[0] - hash_weight[pos] / weights_sum > 0) { new_C[i] = partial_C_max[0]; } else { new_C[i] = ci; } } } } __global__ void calculate_modularity(int n, int c, int* V, int* N, float* W, int* C, int* uniqueC, float* ac, const float weights_sum, float* Q) { __shared__ float partials[THREADS_PER_BLOCK]; int tid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int step = blockDim.x * gridDim.x; float a = 0; for (int i = blockIdx.x; i < n; i += gridDim.x) { for (int j = tid + V[i]; j < V[i + 1]; j += bdim) { if (C[N[j]] == C[i]) { a += W[j] / 2 / weights_sum; } } } for (int i = bid * bdim + tid; i < c; i += step) { a -= ac[uniqueC[i]] * ac[uniqueC[i]] / 4 / weights_sum / weights_sum; } partials[tid] = a; __syncthreads(); for (int s = bdim / 2; s > 0; s >>= 1) { if (tid < s) { partials[tid] += partials[tid + s]; } __syncthreads(); } if (tid == 0) { Q[bid] = partials[0]; } } __global__ void initialize_k(int n, const int* V, const float* W, float* k) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int j = V[i]; j < V[i + 1]; ++j) { if (W[j] == NO_EDGE) break; k[i] += W[j]; } } } __global__ void initialize_ac(int n, int* C, float* k, float* ac) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicAdd(&ac[C[i]], k[i]); } } void initialize_uniqueC_and_C(int n, const dvi& C, dvi& uniqueC, int& c) { uniqueC = C; thrust_sort(uniqueC); c = thrust_unique(uniqueC); } __global__ void initialize_degree(int n, int* V, float* W, int* degree) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int ctr = 0; for (int j = V[i]; j < V[i + 1]; ++j) { if (W[j] == NO_EDGE) break; ctr++; } degree[i] = ctr; } } __global__ void initialize_comm_size(int n, int* C, int* comm_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicAdd(&comm_size[C[i]], 1); } } __global__ void initialize_comm_degree(int n, int* degree, int* C, int* comDegree) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicAdd(&comDegree[C[i]], degree[i]); } } __global__ void initialize_newID(int n, int* C, int* comm_size, int* newID) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (comm_size[C[i]] != 0) { atomicCAS(&newID[C[i]], 0, 1); } } } __global__ void initialize_comm(int n, int* C, int* comm, int* vertex_start) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int res = atomicSub(&vertex_start[C[i]], 1) - 1; comm[res] = i; } } __global__ void initialize_aggregated_V(int n, int* C, int* newID, int* edge_pos, int* aggregated_V) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { atomicCAS(&aggregated_V[newID[C[i]] + 1], 0, edge_pos[C[i]]); } } __global__ void save_final_communities(int initial_n, int* finalC, int* C, int* newID) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < initial_n; i += blockDim.x * gridDim.x) { finalC[i] = newID[C[finalC[i]]]; } } __global__ void merge_community_fill_hashmap(int n, int* V, int* N, float* W, int* C, int* comm, int* degree, int* newID, int* hash_offset, int* hash_comm, float* hash_weight, bool debug) { for (int idx = blockIdx.x; idx < n; idx += gridDim.x) { int tid = threadIdx.x; int step = blockDim.x; int i = comm[idx]; int new_ci = newID[C[i]]; int offset = hash_offset[new_ci]; int size = hash_offset[new_ci + 1] - offset; if (size == 0) { continue; } if (debug) { assert(size >= degree[i]); } for (int j = tid + V[i]; j < V[i + 1]; j += step) { if (W[j] == NO_EDGE) break; hashmap_insert(hash_comm, hash_weight, offset, size, newID[C[N[j]]], W[j]); } } } __global__ void merge_community_initialize_graph(int* hash_offset, int* hash_comm, float* hash_weight, int aggregated_n, int* aggregated_V, int* aggregated_N, float* aggregated_W) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < aggregated_n; i += blockDim.x * gridDim.x) { int edgeId = aggregated_V[i]; for (int pos = hash_offset[i]; pos < hash_offset[i + 1]; ++pos) { int new_cj = hash_comm[pos]; if (new_cj == EMPTY_SLOT) { continue; } aggregated_N[edgeId] = new_cj; aggregated_W[edgeId] = hash_weight[pos]; edgeId++; } } } int main(int argc, char *argv[]) { bool show_assignment = false; float threshold = 0; std::string matrix_file; bool debug = false; int n; //number vertices int m; //number of edges dvi V; //vertices dvi N; //neighbours dvf W; //weights float weights_sum; //sum of weights dvi C; //current clustering dvi new_C; //temporary array to store new communities dvf k; //sum of vertex's edges dvf ac; //sum of cluster edges int c; //number of communities dvi uniqueC; //list of unique communities ids dvi comm_size; //size of ech community dvi degree; //degree of each vertex int initial_n; //number of vertices in the first iteration dvi finalC; //final clustering result float Qp, Qc; //modularity before outermostloop iteration, before and after modularity optimisation respectively cudaEvent_t start_time, stop_time; cudaEvent_t tmp_start_time, tmp_stop_time; float memory_transfer_time = 0; parse_command_line(show_assignment, threshold, matrix_file, argc, argv, debug); vi stl_V; vi stl_N; vf stl_W; read_graph_from_file(matrix_file, n, m, stl_V, stl_N, stl_W); if (debug) std::cerr << "finished data loading" << std::endl; start_recording_time(tmp_start_time, tmp_stop_time); V = stl_V; N = stl_N; W = stl_W; memory_transfer_time += stop_recording_time(tmp_start_time, tmp_stop_time); thrust_sort_graph(V, N, W); dvi just_ones(m, 1); dvi indices(n); dvi occurences(n); thrust_reduce_by_key(V, just_ones, indices, occurences); start_recording_time(tmp_start_time, tmp_stop_time); hvi host_indices = indices; hvi host_occurences = occurences; memory_transfer_time += stop_recording_time(tmp_start_time, tmp_stop_time); hvi host_V(n + 1); host_V[0] = 0; int ptr = 0; for (int i = 0; i < n; i++) { if (host_indices[ptr] == i) { host_V[i + 1] = host_V[i] + host_occurences[ptr]; ptr++; } else { host_V[i + 1] = host_V[i]; } } start_recording_time(tmp_start_time, tmp_stop_time); V = host_V; memory_transfer_time += stop_recording_time(tmp_start_time, tmp_stop_time); start_recording_time(start_time, stop_time); initial_n = n; weights_sum = thrust_sum(W) / 2; finalC = dvi(n); thrust_sequence(finalC); while (true) { C = dvi(n); thrust_sequence(C); k = vf(n, 0); initialize_k<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(V), ptr(W), ptr(k)); if (debug) { float ksum = thrust_sum(k); assert(abs(ksum - 2 * weights_sum) < 0.001 * ksum); } ac = k; //modularity optimisation phase initialize_uniqueC_and_C(n, C, uniqueC, c); dvf dQc(BLOCKS_NUMBER); calculate_modularity<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, c, ptr(V), ptr(N), ptr(W), ptr(C), ptr(uniqueC), ptr(ac), weights_sum, ptr(dQc)); Qc = thrust_sum(dQc); if (debug) { std::cerr << "modularity: " << Qc << std::endl; } do { new_C = C; comm_size = dvi(n, 0); initialize_comm_size<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(C), ptr(comm_size)); degree = dvi(n, 0); initialize_degree<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(V), ptr(W), ptr(degree)); dvi hash_size = dvi(n); thrust_transform_hashmap_size(degree, hash_size, 1.5); dvi hash_offset; dvi hash_comm; dvf hash_weight; hashmap_create(hash_size, hash_offset, hash_comm, hash_weight); compute_move<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(new_C), ptr(V), ptr(N), ptr(W), ptr(C), ptr(comm_size), ptr(k), ptr(ac), weights_sum, ptr(hash_offset), ptr(hash_weight), ptr(hash_comm)); C = new_C; ac.assign(n, 0); initialize_ac<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(C), ptr(k), ptr(ac)); if (debug) { float acsum = thrust_sum(ac); assert(abs(acsum - 2 * weights_sum) < 0.001 * acsum); } Qp = Qc; initialize_uniqueC_and_C(n, C, uniqueC, c); dvf dQc(BLOCKS_NUMBER); calculate_modularity<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, c, ptr(V), ptr(N), ptr(W), ptr(C), ptr(uniqueC), ptr(ac), weights_sum, ptr(dQc)); Qc = thrust_sum(dQc); if (debug) { std::cerr << "modularity: " << Qc << std::endl; } } while (Qc - Qp > threshold); //AGGREGATION PHASE degree = dvi(n, 0); initialize_degree<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(V), ptr(W), ptr(degree)); dvi comDegree(n, 0); initialize_comm_degree<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(degree), ptr(C), ptr(comDegree)); comm_size = dvi(n, 0); initialize_comm_size<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(C), ptr(comm_size)); dvi newID(n, 0); initialize_newID<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(C), ptr(comm_size), ptr(newID)); thrust_inclusive_scan(newID); thrust_sub_for_each(newID, 1); dvi edge_pos = comDegree; thrust_inclusive_scan(edge_pos); dvi vertex_start = comm_size; thrust_inclusive_scan(vertex_start); dvi comm(n); initialize_comm<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(C), ptr(comm), ptr(vertex_start)); int aggregated_n; int aggregated_m; dvi aggregated_V; dvi aggregated_N; dvf aggregated_W; aggregated_n = newID.back() + 1; if (aggregated_n == n) { //nothing changed break; } aggregated_m = edge_pos.back(); aggregated_V = dvi(aggregated_n + 1, 0); initialize_aggregated_V<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(C), ptr(newID), ptr(edge_pos), ptr(aggregated_V)); aggregated_N = dvi(aggregated_m, -1); aggregated_W = dvf(aggregated_m, NO_EDGE); dvi hash_size = dvi(aggregated_n); thrust_copy_if_non_zero(comDegree, comm_size, hash_size); dvi hash_offset; dvi hash_comm; dvf hash_weight; thrust_transform_hashmap_size(hash_size, hash_size, 1.5); hashmap_create(hash_size, hash_offset, hash_comm, hash_weight); merge_community_fill_hashmap<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(n, ptr(V), ptr(N), ptr(W), ptr(C), ptr(comm), ptr(degree), ptr(newID), ptr(hash_offset), ptr(hash_comm), ptr(hash_weight), debug); merge_community_initialize_graph<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(ptr(hash_offset), ptr(hash_comm), ptr(hash_weight), aggregated_n, ptr(aggregated_V), ptr(aggregated_N), ptr(aggregated_W)); save_final_communities<<<BLOCKS_NUMBER, THREADS_PER_BLOCK>>>(initial_n, ptr(finalC), ptr(C), ptr(newID)); n = aggregated_n; m = aggregated_m; V = aggregated_V; N = aggregated_N; W = aggregated_W; }; std::cout << std::fixed << Qc << std::endl; float algorithm_time = stop_recording_time(start_time, stop_time); printf("%3.1f %3.1f\n", algorithm_time, memory_transfer_time); if (show_assignment) { hvi host_finalC = finalC; vi stl_finalC(ptr(host_finalC), ptr(host_finalC) + initial_n); print_clustering(initial_n, stl_finalC); } return 0; }
d5d107e9d8b65727e293692f9307eb81da44bf2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * calcTimeStep.cu * * Created on: 3-11-2014 * Author: Kamil Szewc ([email protected]) */ #include "../../../sph.h" #include "../../../hlp.h" #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <iostream> struct compareVelocity { __host__ __device__ bool operator()(const Particle& p1, const Particle & p2) { real vel_1 = sqrt(pow2(p1.vel.x) + pow2(p1.vel.y)) + p1.s; real vel_2 = sqrt(pow2(p2.vel.x) + pow2(p2.vel.y)) + p2.s; return vel_1 < vel_2; } }; struct compareViscosity { __host__ __device__ bool operator()(const Particle& p1, const Particle & p2) { return p1.mi < p2.mi; } }; static __global__ void setTimeStepAtGPU(Parameters *par, real dt) { par->DT = dt; } void calcTimeStep(thrust::device_vector<Particle>& p, Parameters *par, Parameters *parHost) { if (parHost->T_TIME_STEP == 1) { thrust::device_vector<Particle>::iterator iterator = thrust::max_element(p.begin(), p.end(), compareVelocity()); thrust::host_vector<Particle> pMaxVelocity(iterator, iterator+1); iterator = thrust::max_element(p.begin(), p.end(), compareViscosity()); thrust::host_vector<Particle> pMaxViscosity(iterator, iterator+1); real maxVelocity = sqrt(pow2(pMaxVelocity[0].vel.x) + pow2(pMaxVelocity[0].vel.y)) + pMaxVelocity[0].c; real maxViscosity = pMaxViscosity[0].mi; real timeStepVelocity = 0.04 * 0.25 * parHost->H / maxVelocity; real timeStepViscosity = 0.04 * 0.125 * pow2(parHost->H) / maxViscosity; if (timeStepVelocity > timeStepViscosity) { parHost->DT = timeStepViscosity; } else { parHost->DT = timeStepVelocity; } //std::cout << timeStepVelocity << " " << timeStepViscosity << " " << parHost->DT << std::endl; } hipLaunchKernelGGL(( setTimeStepAtGPU), dim3(1),dim3(1), 0, 0, par, parHost->DT); } void calcTimeStep(thrust::device_vector<Particle>& p, Parameters *par, Parameters *parHost, const real value) { calcTimeStep(p, par, parHost); parHost->DT *= value; hipLaunchKernelGGL(( setTimeStepAtGPU), dim3(1),dim3(1), 0, 0, par, parHost->DT); }
d5d107e9d8b65727e293692f9307eb81da44bf2b.cu
/* * calcTimeStep.cu * * Created on: 3-11-2014 * Author: Kamil Szewc ([email protected]) */ #include "../../../sph.h" #include "../../../hlp.h" #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <iostream> struct compareVelocity { __host__ __device__ bool operator()(const Particle& p1, const Particle & p2) { real vel_1 = sqrt(pow2(p1.vel.x) + pow2(p1.vel.y)) + p1.s; real vel_2 = sqrt(pow2(p2.vel.x) + pow2(p2.vel.y)) + p2.s; return vel_1 < vel_2; } }; struct compareViscosity { __host__ __device__ bool operator()(const Particle& p1, const Particle & p2) { return p1.mi < p2.mi; } }; static __global__ void setTimeStepAtGPU(Parameters *par, real dt) { par->DT = dt; } void calcTimeStep(thrust::device_vector<Particle>& p, Parameters *par, Parameters *parHost) { if (parHost->T_TIME_STEP == 1) { thrust::device_vector<Particle>::iterator iterator = thrust::max_element(p.begin(), p.end(), compareVelocity()); thrust::host_vector<Particle> pMaxVelocity(iterator, iterator+1); iterator = thrust::max_element(p.begin(), p.end(), compareViscosity()); thrust::host_vector<Particle> pMaxViscosity(iterator, iterator+1); real maxVelocity = sqrt(pow2(pMaxVelocity[0].vel.x) + pow2(pMaxVelocity[0].vel.y)) + pMaxVelocity[0].c; real maxViscosity = pMaxViscosity[0].mi; real timeStepVelocity = 0.04 * 0.25 * parHost->H / maxVelocity; real timeStepViscosity = 0.04 * 0.125 * pow2(parHost->H) / maxViscosity; if (timeStepVelocity > timeStepViscosity) { parHost->DT = timeStepViscosity; } else { parHost->DT = timeStepVelocity; } //std::cout << timeStepVelocity << " " << timeStepViscosity << " " << parHost->DT << std::endl; } setTimeStepAtGPU<<<1,1>>>(par, parHost->DT); } void calcTimeStep(thrust::device_vector<Particle>& p, Parameters *par, Parameters *parHost, const real value) { calcTimeStep(p, par, parHost); parHost->DT *= value; setTimeStepAtGPU<<<1,1>>>(par, parHost->DT); }